code stringlengths 17 6.64M |
|---|
class PointGenerator(object):
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1))
if row_major:
return (xx, yy)
else:
return (yy, xx)
def grid_points(self, featmap_size, stride=16, device='cuda'):
(feat_h, feat_w) = featmap_size
shift_x = (torch.arange(0.0, feat_w, device=device) * stride)
shift_y = (torch.arange(0.0, feat_h, device=device) * stride)
(shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0],), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1))
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_size, valid_size, device='cuda'):
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert ((valid_h <= feat_h) and (valid_w <= feat_w))
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
(valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y)
valid = (valid_xx & valid_yy)
return valid
|
def build_assigner(cfg, **kwargs):
if isinstance(cfg, assigners.BaseAssigner):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(type(cfg)))
|
def build_sampler(cfg, **kwargs):
if isinstance(cfg, samplers.BaseSampler):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(type(cfg)))
|
def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg):
bbox_assigner = build_assigner(cfg.assigner)
bbox_sampler = build_sampler(cfg.sampler)
assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels)
sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return (assign_result, sampling_result)
|
class AssignResult(util_mixins.NiceRepr):
'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n '
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
@property
def num_preds(self):
'Return the number of predictions in this assignment.'
return len(self.gt_inds)
@property
def info(self):
'Returns a dictionary of info about the object.'
return {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
def __nice__(self):
'Create a "nice" summary string describing this assign result.'
parts = []
parts.append('num_gts={!r}'.format(self.num_gts))
if (self.gt_inds is None):
parts.append('gt_inds={!r}'.format(self.gt_inds))
else:
parts.append('gt_inds.shape={!r}'.format(tuple(self.gt_inds.shape)))
if (self.max_overlaps is None):
parts.append('max_overlaps={!r}'.format(self.max_overlaps))
else:
parts.append('max_overlaps.shape={!r}'.format(tuple(self.max_overlaps.shape)))
if (self.labels is None):
parts.append('labels={!r}'.format(self.labels))
else:
parts.append('labels.shape={!r}'.format(tuple(self.labels.shape)))
return ', '.join(parts)
@classmethod
def random(cls, **kwargs):
'Create random AssignResult for tests or debugging.\n\n Kwargs:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assinged to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n AssignResult :\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n '
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(1, (num_classes + 1), size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels])
|
class BaseAssigner(metaclass=ABCMeta):
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
pass
|
class CombinedSampler(BaseSampler):
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
|
class InstanceBalancedPosSampler(RandomSampler):
def _sample_pos(self, assign_result, num_expected, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int((round((num_expected / float(num_gts))) + 1))
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero((assign_result.gt_inds == i.item()))
if (inds.numel() != 0):
inds = inds.squeeze(1)
else:
continue
if (len(inds) > num_per_gt):
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((set(pos_inds.cpu()) - set(sampled_inds.cpu()))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif (len(sampled_inds) > num_expected):
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
class IoUBalancedNegSampler(RandomSampler):
'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n The others are sampled from proposals whose IoU are higher than\n `floor_thr`. These proposals are sampled from some bins evenly, which are\n split by `num_bins` via IoU evenly.\n\n Args:\n num (int): number of proposals.\n pos_fraction (float): fraction of positive proposals.\n floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n set to -1 if all using IoU balanced sampling.\n floor_fraction (float): sampling fraction of proposals under floor_thr.\n num_bins (int): number of bins in IoU balanced sampling.\n '
def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs)
assert ((floor_thr >= 0) or (floor_thr == (- 1)))
assert (0 <= floor_fraction <= 1)
assert (num_bins >= 1)
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
max_iou = max_overlaps.max()
iou_interval = ((max_iou - self.floor_thr) / self.num_bins)
per_num_expected = int((num_expected / self.num_bins))
sampled_inds = []
for i in range(self.num_bins):
start_iou = (self.floor_thr + (i * iou_interval))
end_iou = (self.floor_thr + ((i + 1) * iou_interval))
tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0])
tmp_inds = list((tmp_set & full_set))
if (len(tmp_inds) > per_num_expected):
tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((full_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
neg_set = set(neg_inds.cpu().numpy())
if (self.floor_thr > 0):
floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0])
iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0])
elif (self.floor_thr == 0):
floor_set = set(np.where((max_overlaps == 0))[0])
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
else:
floor_set = set()
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
self.floor_thr = 0
floor_neg_inds = list((floor_set & neg_set))
iou_sampling_neg_inds = list((iou_sampling_set & neg_set))
num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction)))
if (len(iou_sampling_neg_inds) > num_expected_iou_sampling):
if (self.num_bins >= 2):
iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = (num_expected - len(iou_sampled_inds))
if (len(floor_neg_inds) > num_expected_floor):
sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds))
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((neg_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device)
return sampled_inds
|
class OHEMSampler(BaseSampler):
'Online Hard Example Mining Sampler described in [1]_.\n\n References:\n .. [1] https://arxiv.org/pdf/1604.03540.pdf\n '
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
if (not hasattr(context, 'num_stages')):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(feats[:self.bbox_roi_extractor.num_inputs], rois)
(cls_score, _) = self.bbox_head(bbox_feats)
loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls']
(_, topk_loss_inds) = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats)
def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], assign_result.labels[neg_inds], feats)
|
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0)).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0)).squeeze((- 1)).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
return sampling_result
|
class RandomSampler(BaseSampler):
def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
'Random select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n '
assert (len(gallery) >= num)
is_tensor = isinstance(gallery, torch.Tensor)
if (not is_tensor):
gallery = torch.tensor(gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if (not is_tensor):
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
'Randomly sample some positive samples.'
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
'Randomly sample some negative samples.'
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
def wider_face_classes():
return ['face']
|
def voc_classes():
return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
def imagenet_det_classes():
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
|
def imagenet_vid_classes():
return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
|
def coco_classes():
return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
|
def cityscapes_classes():
return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_classes()'))
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
|
class EvalHook(Hook):
'Evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n '
def __init__(self, dataloader, interval=1, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs)
for (name, val) in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
|
class DistEvalHook(EvalHook):
'Distributed evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n '
def __init__(self, dataloader, interval=1, gpu_collect=False, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect)
if (runner.rank == 0):
print('\n')
self.evaluate(runner, results)
|
def auto_fp16(apply_to=None, out_fp32=False):
"Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other than fp32 tensors are\n ignored.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp32 (bool): Whether to convert the output back to fp32.\n\n :Example:\n\n class MyModule1(nn.Module)\n\n # Convert x and y to fp16\n @auto_fp16()\n def forward(self, x, y):\n pass\n\n class MyModule2(nn.Module):\n\n # convert pred to fp16\n @auto_fp16(apply_to=('pred', ))\n def do_something(self, pred, others):\n pass\n "
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module')
if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)):
return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
args_to_cast = (args_info.args if (apply_to is None) else apply_to)
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for (i, arg_name) in enumerate(arg_names):
if (arg_name in args_to_cast):
new_args.append(cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
new_kwargs = {}
if kwargs:
for (arg_name, arg_value) in kwargs.items():
if (arg_name in args_to_cast):
new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
output = old_func(*new_args, **new_kwargs)
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
|
def force_fp32(apply_to=None, out_fp16=False):
"Decorator to convert input arguments to fp32 in force.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If there are some inputs that must be processed\n in fp32 mode, then this decorator can handle it. If inputs arguments are\n fp16 tensors, they will be converted to fp32 automatically. Arguments other\n than fp16 tensors are ignored.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp16 (bool): Whether to convert the output back to fp16.\n\n :Example:\n\n class MyModule1(nn.Module)\n\n # Convert x and y to fp32\n @force_fp32()\n def loss(self, x, y):\n pass\n\n class MyModule2(nn.Module):\n\n # convert pred to fp32\n @force_fp32(apply_to=('pred', ))\n def post_process(self, pred, others):\n pass\n "
def force_fp32_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('@force_fp32 can only be used to decorate the method of nn.Module')
if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)):
return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
args_to_cast = (args_info.args if (apply_to is None) else apply_to)
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for (i, arg_name) in enumerate(arg_names):
if (arg_name in args_to_cast):
new_args.append(cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
new_kwargs = dict()
if kwargs:
for (arg_name, arg_value) in kwargs.items():
if (arg_name in args_to_cast):
new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
output = old_func(*new_args, **new_kwargs)
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper
|
class Fp16OptimizerHook(OptimizerHook):
'FP16 optimizer hook.\n\n The steps of fp16 optimizer is as follows.\n 1. Scale the loss value.\n 2. BP in the fp16 model.\n 2. Copy gradients from fp16 model to fp32 weights.\n 3. Update fp32 weights.\n 4. Copy updated parameters from fp32 weights to fp16 model.\n\n Refer to https://arxiv.org/abs/1710.03740 for more details.\n\n Args:\n loss_scale (float): Scale factor multiplied with loss.\n '
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), loss_scale=512.0, distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
runner.optimizer.param_groups = copy.deepcopy(runner.optimizer.param_groups)
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
'Copy gradients from fp16 model to fp32 weight copy.'
for (fp32_param, fp16_param) in zip(fp32_weights, fp16_net.parameters()):
if (fp16_param.grad is not None):
if (fp32_param.grad is None):
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net, fp32_weights):
'Copy updated params from fp32 weight copy to fp16 model.'
for (fp16_param, fp32_param) in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
runner.model.zero_grad()
runner.optimizer.zero_grad()
scaled_loss = (runner.outputs['loss'] * self.loss_scale)
scaled_loss.backward()
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
if self.distributed:
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
for param in fp32_weights:
if (param.grad is not None):
param.grad.div_(self.loss_scale)
if (self.grad_clip is not None):
self.clip_grads(fp32_weights)
runner.optimizer.step()
self.copy_params_to_fp16(runner.model, fp32_weights)
|
def wrap_fp16_model(model):
model.half()
patch_norm_fp32(model)
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
|
def patch_norm_fp32(module):
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if (isinstance(module, nn.GroupNorm) or (torch.__version__ < '1.3')):
module.forward = patch_forward_method(module.forward, torch.half, torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
|
def patch_forward_method(func, src_type, dst_type, convert_output=True):
'Patch the forward method of a module.\n\n Args:\n func (callable): The original forward method.\n src_type (torch.dtype): Type of input arguments to be converted from.\n dst_type (torch.dtype): Type of input arguments to be converted to.\n convert_output (bool): Whether to convert the output back to src_type.\n\n Returns:\n callable: The patched forward method.\n '
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type), **cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward
|
def cast_tensor_type(inputs, src_type, dst_type):
if isinstance(inputs, torch.Tensor):
return inputs.to(dst_type)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({k: cast_tensor_type(v, src_type, dst_type) for (k, v) in inputs.items()})
elif isinstance(inputs, abc.Iterable):
return type(inputs)((cast_tensor_type(item, src_type, dst_type) for item in inputs))
else:
return inputs
|
def split_combined_polys(polys, poly_lens, polys_per_mask):
'Split the combined 1-D polys into masks.\n\n A mask is represented as a list of polys, and a poly is represented as\n a 1-D array. In dataset, all masks are concatenated into a single 1-D\n tensor. Here we need to split the tensor into original representations.\n\n Args:\n polys (list): a list (length = image num) of 1-D tensors\n poly_lens (list): a list (length = image num) of poly length\n polys_per_mask (list): a list (length = image num) of poly number\n of each mask\n\n Returns:\n list: a list (length = image num) of list (length = mask num) of\n list (length = poly num) of numpy array\n '
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
|
def build_optimizer(model, optimizer_cfg):
"Build optimizer from configs.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are:\n - type: class name of the optimizer.\n - lr: base learning rate.\n Optional fields are:\n - any arguments of the corresponding optimizer type, e.g.,\n weight_decay, momentum, etc.\n - paramwise_options: a dict with 4 accepted fileds\n (bias_lr_mult, bias_decay_mult, norm_decay_mult,\n dwconv_decay_mult).\n `bias_lr_mult` and `bias_decay_mult` will be multiplied to\n the lr and weight decay respectively for all bias parameters\n (except for the normalization layers), and\n `norm_decay_mult` will be multiplied to the weight decay\n for all weight and bias parameters of normalization layers.\n `dwconv_decay_mult` will be multiplied to the weight decay\n for all weight and bias parameters of depthwise conv layers.\n\n Returns:\n torch.optim.Optimizer: The initialized optimizer.\n\n Example:\n >>> import torch\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> optimizer = build_optimizer(model, optimizer_cfg)\n "
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = optimizer_cfg.copy()
paramwise_options = optimizer_cfg.pop('paramwise_options', None)
if (paramwise_options is None):
params = model.parameters()
else:
assert isinstance(paramwise_options, dict)
base_lr = optimizer_cfg['lr']
base_wd = optimizer_cfg.get('weight_decay', None)
if (('bias_decay_mult' in paramwise_options) or ('norm_decay_mult' in paramwise_options) or ('dwconv_decay_mult' in paramwise_options)):
assert (base_wd is not None)
bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.0)
bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.0)
norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.0)
dwconv_decay_mult = paramwise_options.get('dwconv_decay_mult', 1.0)
named_modules = dict(model.named_modules())
params = []
for (name, param) in model.named_parameters():
param_group = {'params': [param]}
if (not param.requires_grad):
params.append(param_group)
continue
if re.search('(bn|gn)(\\d+)?.(weight|bias)', name):
if (base_wd is not None):
param_group['weight_decay'] = (base_wd * norm_decay_mult)
elif name.endswith('.bias'):
param_group['lr'] = (base_lr * bias_lr_mult)
if (base_wd is not None):
param_group['weight_decay'] = (base_wd * bias_decay_mult)
module_name = name.replace('.weight', '').replace('.bias', '')
if ((module_name in named_modules) and (base_wd is not None)):
module = named_modules[module_name]
if (isinstance(module, torch.nn.Conv2d) and (module.in_channels == module.groups)):
param_group['weight_decay'] = (base_wd * dwconv_decay_mult)
params.append(param_group)
optimizer_cfg['params'] = params
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
|
@OPTIMIZERS.register_module
class CopyOfSGD(SGD):
'A clone of torch.optim.SGD.\n\n A customized optimizer could be defined like CopyOfSGD. You may derive from\n built-in optimizers in torch.optim, or directly implement a new optimizer.\n '
|
def register_torch_optimizers():
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if (inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer)):
OPTIMIZERS.register_module(_optim)
torch_optimizers.append(module_name)
return torch_optimizers
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1)):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if (self.grad_clip is not None):
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
|
def _concat_dataset(cfg, default_args=None):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
|
def build_dataset(cfg, default_args=None):
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
def build_dataloader(dataset, imgs_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of\n each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n '
(rank, world_size) = get_dist_info()
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, imgs_per_gpu, world_size, rank)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
sampler = (GroupSampler(dataset, imgs_per_gpu) if shuffle else None)
batch_size = (num_gpus * imgs_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader
|
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
for (i, img_info) in enumerate(self.img_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if (self.filter_empty_gt and ((self.img_ids[i] not in ids_with_ann) or all_iscrowd)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, seg_map.\n "masks" are already decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
'Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is "somepath/xxx",\n the txt files will be named "somepath/xxx.txt".\n\n Returns:\n list[str: str]: result txt files which contains corresponding\n instance segmentation images.\n '
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt'))
(bbox_result, segm_result) = result
bboxes = np.vstack(bbox_result)
segms = mmcv.concat_list(segm_result)
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
assert (len(bboxes) == len(segms) == len(labels))
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = bboxes[(i, (- 1))]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix, (basename + '_{}_{}.png'.format(i, classes)))
mmcv.imwrite(mask, png_filename)
fout.write('{} {} {}\n'.format(osp.basename(png_filename), class_id, score))
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
'Format the results to txt (standard format for Cityscapes\n evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing\n the json filepaths, tmp_dir is the temporal directory created\n for saving txt/png files when txtfile_prefix is not specified.\n '
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (txtfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
'Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None):\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str: float]\n '
eval_results = dict()
metrics = (metric.copy() if isinstance(metric, list) else [metric])
if ('cityscapes' in metrics):
eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
if (len(metrics) > 0):
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt)
eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
(result_files, tmp_dir) = self.format_results(results, txtfile_prefix)
if (tmp_dir is None):
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = {}
print_log('Evaluating results under {} ...'.format(result_dir), logger=logger)
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), 'Cannot find ground truth images in {}.'.format(CSEval.args.groundTruthSearch)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results
|
@DATASETS.register_module
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {cat_id: (i + 1) for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
for (i, img_info) in enumerate(self.img_infos):
if (self.filter_empty_gt and (self.img_ids[i] not in ids_with_ann)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, seg_map. "masks" are raw annotations and not\n decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
_bbox = bbox.tolist()
return [_bbox[0], _bbox[1], ((_bbox[2] - _bbox[0]) + 1), ((_bbox[3] - _bbox[1]) + 1)]
def _proposal2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
(det, seg) = results[idx]
for label in range(len(det)):
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return (bbox_json_results, segm_json_results)
def results2json(self, results, outfile_prefix):
'Dump the detection results to a json file.\n\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is "somepath/xxx", the json files will be named\n "somepath/xxx.bbox.json", "somepath/xxx.segm.json",\n "somepath/xxx.proposal.json".\n\n Returns:\n dict[str: str]: Possible keys are "bbox", "segm", "proposal", and\n values are corresponding filenames.\n '
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
result_files['segm'] = '{}.{}.json'.format(outfile_prefix, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = '{}.{}.json'.format(outfile_prefix, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.getAnnIds(imgIds=self.img_ids[i])
ann_info = self.coco.loadAnns(ann_ids)
if (len(ann_info) == 0):
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if (ann.get('ignore', False) or ann['iscrowd']):
continue
(x1, y1, w, h) = ann['bbox']
bboxes.append([x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)])
bboxes = np.array(bboxes, dtype=np.float32)
if (bboxes.shape[0] == 0):
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
'Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing\n the json filepaths, tmp_dir is the temporal directory created\n for saving json files when jsonfile_prefix is not specified.\n '
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (jsonfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
'Evaluation in COCO protocol.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str: float]\n '
metrics = (metric if isinstance(metric, list) else [metric])
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if (metric not in allowed_metrics):
raise KeyError('metric {} is not supported'.format(metric))
(result_files, tmp_dir) = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = 'Evaluating {}...'.format(metric)
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
if (metric == 'proposal_fast'):
ar = self.fast_eval_recall(results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for (i, num) in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if (metric not in result_files):
raise KeyError('{} is not in results'.format(metric))
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR)
break
iou_type = ('bbox' if (metric == 'proposal') else metric)
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.imgIds = self.img_ids
if (metric == 'proposal'):
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
metric_items = ['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000']
for (i, item) in enumerate(metric_items):
val = float('{:.3f}'.format(cocoEval.stats[(i + 6)]))
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise:
pass
metric_items = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']
for i in range(len(metric_items)):
key = '{}_{}'.format(metric, metric_items[i])
val = float('{:.3f}'.format(cocoEval.stats[i]))
eval_results[key] = val
eval_results['{}_mAP_copypaste'.format(metric)] = '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}'.format(ap=cocoEval.stats[:6])
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results
|
@DATASETS.register_module
class CustomDataset(Dataset):
"Custom dataset for detection.\n\n Annotation format:\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n 'height': 720,\n 'ann': {\n 'bboxes': <np.ndarray> (n, 4),\n 'labels': <np.ndarray> (n, ),\n 'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n 'labels_ignore': <np.ndarray> (k, 4) (optional field)\n }\n },\n ...\n ]\n\n The `ann` field is optional for testing.\n "
CLASSES = None
def __init__(self, ann_file, pipeline, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
if (self.data_root is not None):
if (not osp.isabs(self.ann_file)):
self.ann_file = osp.join(self.data_root, self.ann_file)
if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
self.img_infos = self.load_annotations(self.ann_file)
if (self.proposal_file is not None):
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
if (not test_mode):
valid_inds = self._filter_imgs()
self.img_infos = [self.img_infos[i] for i in valid_inds]
if (self.proposals is not None):
self.proposals = [self.proposals[i] for i in valid_inds]
if (not self.test_mode):
self._set_group_flag()
self.pipeline = Compose(pipeline)
def __len__(self):
return len(self.img_infos)
def load_annotations(self, ann_file):
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
'Filter images too small.'
valid_inds = []
for (i, img_info) in enumerate(self.img_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n '
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.img_infos[i]
if ((img_info['width'] / img_info['height']) > 1):
self.flag[i] = 1
def _rand_another(self, idx):
pool = np.where((self.flag == self.flag[idx]))[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if (data is None):
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
pass
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
'Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. It must be a float\n when evaluating mAP, and can be a list when evaluating recall.\n Default: 0.5.\n scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n Default: None.\n '
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError('metric {} is not supported'.format(metric))
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if (metric == 'mAP'):
assert isinstance(iou_thr, float)
(mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger)
eval_results['mAP'] = mean_ap
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thr):
eval_results['recall@{}@{}'.format(num, iou)] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
return eval_results
|
@DATASETS.register_module
class ConcatDataset(_ConcatDataset):
'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n '
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
|
@DATASETS.register_module
class RepeatDataset(object):
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n '
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[(idx % self._ori_len)]
def __len__(self):
return (self.times * self._ori_len)
|
@PIPELINES.register_module
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
|
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n '
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(type(data)))
|
@PIPELINES.register_module
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return (self.__class__.__name__ + '(keys={})'.format(self.keys))
|
@PIPELINES.register_module
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
img = results[key]
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return (self.__class__.__name__ + '(keys={})'.format(self.keys))
|
@PIPELINES.register_module
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (self.__class__.__name__ + '(keys={}, order={})'.format(self.keys, self.order))
|
@PIPELINES.register_module
class ToDataContainer(object):
def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return (self.__class__.__name__ + '(fields={})'.format(self.fields))
|
@PIPELINES.register_module
class DefaultFormatBundle(object):
'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img",\n "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,\n (3)to DataContainer (stack=True)\n '
def __call__(self, results):
if ('img' in results):
img = results['img']
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
|
@PIPELINES.register_module
class Collect(object):
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "proposals", "gt_bboxes",\n "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".\n\n The "img_meta" item is always populated. The contents of the "img_meta"\n dictionary depends on "meta_keys". By default this includes:\n\n - "img_shape": shape of the image input to the network as a tuple\n (h, w, c). Note that images may be zero padded on the bottom/right\n if the batch tensor is larger than this shape.\n\n - "scale_factor": a float indicating the preprocessing scale\n\n - "flip": a boolean indicating if image flip transform was used\n\n - "filename": path to the image file\n\n - "ori_shape": original shape of the image as a tuple (h, w, c)\n\n - "pad_shape": image shape after padding\n\n - "img_norm_cfg": a dict of normalization information:\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n '
def __init__(self, keys, meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + '(keys={}, meta_keys={})'.format(self.keys, self.meta_keys))
|
@PIPELINES.register_module
class WrapFieldsToLists(object):
"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapIntoLists')\n >>> ]\n "
def __call__(self, results):
for (key, val) in results.items():
results[key] = [val]
return results
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
@PIPELINES.register_module
class InstaBoost(object):
'Data augmentation method in paper "InstaBoost: Boosting Instance\n Segmentation Via Probability Map Guided Copy-Pasting" Implementation\n details can refer to https://github.com/GothicAi/Instaboost.'
def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=((- 1), 1), color_prob=0.5, hflag=False, aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
(x1, y1, x2, y2) = bbox
bbox = [x1, y1, ((x2 - x1) + 1), ((y2 - y1) + 1)]
anns.append({'category_id': label, 'segmentation': mask, 'bbox': bbox})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
(x1, y1, w, h) = ann['bbox']
bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[(1 - self.aug_ratio), self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first.')
(anns, img) = instaboost.get_new_data(anns, img, self.cfg, background=None)
results = self._parse_anns(results, anns, img)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(cfg={}, aug_ratio={})'.format(self.cfg, self.aug_ratio)
return repr_str
|
@PIPELINES.register_module
class MultiScaleFlipAug(object):
def __init__(self, transforms, img_scale, flip=False):
self.transforms = Compose(transforms)
self.img_scale = (img_scale if isinstance(img_scale, list) else [img_scale])
assert mmcv.is_list_of(self.img_scale, tuple)
self.flip = flip
def __call__(self, results):
aug_data = []
flip_aug = ([False, True] if self.flip else [False])
for scale in self.img_scale:
for flip in flip_aug:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for (key, val) in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(transforms={}, img_scale={}, flip={})'.format(self.transforms, self.img_scale, self.flip)
return repr_str
|
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
|
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, size) in enumerate(self.group_sizes):
self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu)
def __iter__(self):
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size == 0):
continue
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
np.random.shuffle(indice)
num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice))
indice = np.concatenate([indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
|
class DistributedGroupSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n '
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None):
(_rank, _num_replicas) = get_dist_info()
if (num_replicas is None):
num_replicas = _num_replicas
if (rank is None):
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, j) in enumerate(self.group_sizes):
self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu)
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size > 0):
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
indice = indice[list(torch.randperm(int(size), generator=g))].tolist()
extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice))
tmp = indice.copy()
for _ in range((extra // size)):
indice.extend(tmp)
indice.extend(tmp[:(extra % size)])
indices.extend(indice)
assert (len(indices) == self.total_size)
indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))]
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
@DATASETS.register_module
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if ('VOC2007' in self.img_prefix):
self.year = 2007
elif ('VOC2012' in self.img_prefix):
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError('metric {} is not supported'.format(metric))
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if (metric == 'mAP'):
assert isinstance(iou_thr, float)
if (self.year == 2007):
ds_name = 'voc07'
else:
ds_name = self.dataset.CLASSES
(mean_ap, _) = eval_map(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger)
eval_results['mAP'] = mean_ap
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thr):
eval_results['recall@{}@{}'.format(num, iou)] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
return eval_results
|
@DATASETS.register_module
class WIDERFaceDataset(XMLDataset):
'Reader for the WIDER Face dataset in PASCAL VOC format.\n\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n '
CLASSES = ('face',)
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
img_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = '{}.jpg'.format(img_id)
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
img_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height))
return img_infos
|
@DATASETS.register_module
class XMLDataset(CustomDataset):
def __init__(self, min_size=None, **kwargs):
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: (i + 1) for (i, cat) in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
img_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = 'JPEGImages/{}.jpg'.format(img_id)
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
img_infos.append(dict(id=img_id, filename=filename, width=width, height=height))
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = self.cat2label[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text))]
ignore = False
if self.min_size:
assert (not self.test_mode)
w = (bbox[2] - bbox[0])
h = (bbox[3] - bbox[1])
if ((w < self.min_size) or (h < self.min_size)):
ignore = True
if (difficult or ignore):
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64))
return ann
|
@HEADS.register_module
class GARetinaHead(GuidedAnchorHead):
'Guided-Anchor-based RetinaNet head.'
def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, **kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, (self.num_anchors * 2), 1)
self.feature_adaption_cls = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups)
self.feature_adaption_reg = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups)
self.retina_cls = MaskedConv2d(self.feat_channels, (self.num_anchors * self.cls_out_channels), 3, padding=1)
self.retina_reg = MaskedConv2d(self.feat_channels, (self.num_anchors * 4), 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
self.feature_adaption_cls.init_weights()
self.feature_adaption_reg.init_weights()
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_loc, std=0.01, bias=bias_cls)
normal_init(self.conv_shape, std=0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if (not self.training):
mask = (loc_pred.sigmoid()[0] >= self.loc_filter_thr)
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return (cls_score, bbox_pred, shape_pred, loc_pred)
|
@HEADS.register_module
class RetinaHead(AnchorHead):
'An anchor-based head used in [1]_.\n\n The head contains two subnetworks. The first classifies anchor boxes and\n the second regresses deltas for the anchors.\n\n References:\n .. [1] https://arxiv.org/pdf/1708.02002.pdf\n\n Example:\n >>> import torch\n >>> self = RetinaHead(11, 7)\n >>> x = torch.rand(1, 7, 32, 32)\n >>> cls_score, bbox_pred = self.forward_single(x)\n >>> # Each anchor predicts a score for each class except background\n >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n >>> assert cls_per_anchor == (self.num_classes - 1)\n >>> assert box_per_anchor == 4\n '
def __init__(self, num_classes, in_channels, stacked_convs=4, octave_base_scale=4, scales_per_octave=3, conv_cfg=None, norm_cfg=None, **kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
octave_scales = np.array([(2 ** (i / scales_per_octave)) for i in range(scales_per_octave)])
anchor_scales = (octave_scales * octave_base_scale)
super(RetinaHead, self).__init__(num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_anchors * self.cls_out_channels), 3, padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_anchors * 4), 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@HEADS.register_module
class RetinaSepBNHead(AnchorHead):
'"RetinaHead with separate BN.\n\n In RetinaHead, conv/norm layers are shared across different FPN levels,\n while in RetinaSepBNHead, conv layers are shared across different FPN\n levels, but BN layers are separated.\n '
def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, octave_base_scale=4, scales_per_octave=3, conv_cfg=None, norm_cfg=None, **kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
octave_scales = np.array([(2 ** (i / scales_per_octave)) for i in range(scales_per_octave)])
anchor_scales = (octave_scales * octave_base_scale)
super(RetinaSepBNHead, self).__init__(num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_anchors * self.cls_out_channels), 3, padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_anchors * 4), 3, padding=1)
def init_weights(self):
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for (i, x) in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return (cls_scores, bbox_preds)
|
@HEADS.register_module
class SSDHead(AnchorHead):
def __init__(self, input_size=300, num_classes=81, in_channels=(512, 1024, 512, 256, 256, 256), anchor_strides=(8, 16, 32, 64, 100, 300), basesize_ratio_range=(0.1, 0.9), anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), target_means=(0.0, 0.0, 0.0, 0.0), target_stds=(1.0, 1.0, 1.0, 1.0)):
super(AnchorHead, self).__init__()
self.input_size = input_size
self.num_classes = num_classes
self.in_channels = in_channels
self.cls_out_channels = num_classes
num_anchors = [((len(ratios) * 2) + 2) for ratios in anchor_ratios]
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(nn.Conv2d(in_channels[i], (num_anchors[i] * 4), kernel_size=3, padding=1))
cls_convs.append(nn.Conv2d(in_channels[i], (num_anchors[i] * num_classes), kernel_size=3, padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
(min_ratio, max_ratio) = basesize_ratio_range
min_ratio = int((min_ratio * 100))
max_ratio = int((max_ratio * 100))
step = int((np.floor((max_ratio - min_ratio)) / (len(in_channels) - 2)))
min_sizes = []
max_sizes = []
for r in range(int(min_ratio), (int(max_ratio) + 1), step):
min_sizes.append(int(((input_size * r) / 100)))
max_sizes.append(int(((input_size * (r + step)) / 100)))
if (input_size == 300):
if (basesize_ratio_range[0] == 0.15):
min_sizes.insert(0, int(((input_size * 7) / 100)))
max_sizes.insert(0, int(((input_size * 15) / 100)))
elif (basesize_ratio_range[0] == 0.2):
min_sizes.insert(0, int(((input_size * 10) / 100)))
max_sizes.insert(0, int(((input_size * 20) / 100)))
elif (input_size == 512):
if (basesize_ratio_range[0] == 0.1):
min_sizes.insert(0, int(((input_size * 4) / 100)))
max_sizes.insert(0, int(((input_size * 10) / 100)))
elif (basesize_ratio_range[0] == 0.15):
min_sizes.insert(0, int(((input_size * 7) / 100)))
max_sizes.insert(0, int(((input_size * 15) / 100)))
self.anchor_generators = []
self.anchor_strides = anchor_strides
for k in range(len(anchor_strides)):
base_size = min_sizes[k]
stride = anchor_strides[k]
ctr = (((stride - 1) / 2.0), ((stride - 1) / 2.0))
scales = [1.0, np.sqrt((max_sizes[k] / min_sizes[k]))]
ratios = [1.0]
for r in anchor_ratios[k]:
ratios += [(1 / r), r]
anchor_generator = AnchorGenerator(base_size, scales, ratios, scale_major=False, ctr=ctr)
indices = list(range(len(ratios)))
indices.insert(1, len(indices))
anchor_generator.base_anchors = torch.index_select(anchor_generator.base_anchors, 0, torch.LongTensor(indices))
self.anchor_generators.append(anchor_generator)
self.target_means = target_means
self.target_stds = target_stds
self.use_sigmoid_cls = False
self.cls_focal_loss = False
self.fp16_enabled = False
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform', bias=0)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for (feat, reg_conv, cls_conv) in zip(feats, self.reg_convs, self.cls_convs):
cls_scores.append(cls_conv(feat))
bbox_preds.append(reg_conv(feat))
return (cls_scores, bbox_preds)
def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples, cfg):
loss_cls_all = (F.cross_entropy(cls_score, labels, reduction='none') * label_weights)
pos_inds = (labels > 0).nonzero().view((- 1))
neg_inds = (labels == 0).nonzero().view((- 1))
num_pos_samples = pos_inds.size(0)
num_neg_samples = (cfg.neg_pos_ratio * num_pos_samples)
if (num_neg_samples > neg_inds.size(0)):
num_neg_samples = neg_inds.size(0)
(topk_loss_cls_neg, _) = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = ((loss_cls_pos + loss_cls_neg) / num_total_samples)
loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets, bbox_weights, beta=cfg.smoothl1_beta, avg_factor=num_total_samples)
return (loss_cls[None], loss_bbox)
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == len(self.anchor_generators))
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
cls_reg_targets = anchor_target(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, sampling=False, unmap_outputs=False)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1)
all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1))
all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1))
all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2))
all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4)
all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4)
assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!'
(losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos, cfg=cfg)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
class HRModule(nn.Module):
'High-Resolution Module for HRNet.\n\n In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n is in this module.\n '
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')):
super(HRModule, self).__init__()
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks))
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels))
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(in_channels))
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
|
@BACKBONES.register_module
class HRNet(nn.Module):
"HRNet backbone.\n\n High-Resolution Representations for Labeling Pixels and Regions\n arXiv: https://arxiv.org/abs/1904.04514\n\n Args:\n extra (dict): detailed configuration for each stage of HRNet.\n in_channels (int): Number of input image channels. Normally 3.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmdet.models import HRNet\n >>> import torch\n >>> extra = dict(\n >>> stage1=dict(\n >>> num_modules=1,\n >>> num_branches=1,\n >>> block='BOTTLENECK',\n >>> num_blocks=(4, ),\n >>> num_channels=(64, )),\n >>> stage2=dict(\n >>> num_modules=1,\n >>> num_branches=2,\n >>> block='BASIC',\n >>> num_blocks=(4, 4),\n >>> num_channels=(32, 64)),\n >>> stage3=dict(\n >>> num_modules=4,\n >>> num_branches=3,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4),\n >>> num_channels=(32, 64, 128)),\n >>> stage4=dict(\n >>> num_modules=3,\n >>> num_branches=4,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4, 4),\n >>> num_channels=(32, 64, 128, 256)))\n >>> self = HRNet(extra, in_channels=1)\n >>> self.eval()\n >>> inputs = torch.rand(1, 1, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 32, 8, 8)\n (1, 64, 4, 4)\n (1, 128, 2, 2)\n (1, 256, 1, 1)\n "
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False):
super(HRNet, self).__init__()
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = (num_channels * block.expansion)
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(build_conv_layer(self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(((i + 1) - num_branches_pre)):
in_channels = num_channels_pre_layer[(- 1)]
out_channels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else in_channels)
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
for i in range(num_modules):
if ((not multiscale_output) and (i == (num_modules - 1))):
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return (nn.Sequential(*hr_modules), in_channels)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
super(HRNet, self).train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None):
super(BasicBlock, self).__init__()
assert (dcn is None), 'Not implemented yet.'
assert (gen_attention is None), 'Not implemented yet.'
assert (gcb is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert (not with_cp)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None):
'Bottleneck block for ResNet.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__()
assert (style in ['pytorch', 'caffe'])
assert ((dcn is None) or isinstance(dcn, dict))
assert ((gcb is None) or isinstance(gcb, dict))
assert ((gen_attention is None) or isinstance(gen_attention, dict))
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = (dcn is not None)
self.gcb = gcb
self.with_gcb = (gcb is not None)
self.gen_attention = gen_attention
self.with_gen_attention = (gen_attention is not None)
if (self.style == 'pytorch'):
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg cannot be None for DCN'
self.conv2 = build_conv_layer(dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_gcb:
gcb_inplanes = (planes * self.expansion)
self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)
if self.with_gen_attention:
self.gen_attention_block = GeneralizedAttention(planes, **gen_attention)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_gen_attention:
out = self.gen_attention_block(out)
out = self.conv3(out)
out = self.norm3(out)
if self.with_gcb:
out = self.context_block(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None, gen_attention_blocks=[]):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=(gen_attention if (0 in gen_attention_blocks) else None)))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=dilation, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=(gen_attention if (i in gen_attention_blocks) else None)))
return nn.Sequential(*layers)
|
@BACKBONES.register_module
class ResNet(nn.Module):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Normally 3.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmdet.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), gcb=None, stage_with_gcb=(False, False, False, False), gen_attention=None, stage_with_gen_attention=((), (), (), ()), with_cp=False, zero_init_residual=True):
super(ResNet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.gen_attention = gen_attention
self.gcb = gcb
self.stage_with_gcb = stage_with_gcb
if (gcb is not None):
assert (len(stage_with_gcb) == num_stages)
self.zero_init_residual = zero_init_residual
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer(in_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
gcb = (self.gcb if self.stage_with_gcb[i] else None)
planes = (64 * (2 ** i))
res_layer = make_res_layer(self.block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention, gen_attention_blocks=stage_with_gen_attention[i])
self.inplanes = (planes * self.block.expansion)
layer_name = 'layer{}'.format((i + 1))
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * 64) * (2 ** (len(self.stage_blocks) - 1)))
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels):
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m.conv2, 'conv_offset')):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
class Bottleneck(_Bottleneck):
def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / 64))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
|
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, groups=1, base_width=4, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, groups=groups, base_width=base_width, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=dilation, groups=groups, base_width=base_width, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb))
return nn.Sequential(*layers)
|
@BACKBONES.register_module
class ResNeXt(ResNet):
'ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Normally 3.\n num_stages (int): Resnet stages, normally 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmdet.models import ResNeXt\n >>> import torch\n >>> self = ResNeXt(depth=50)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 256, 8, 8)\n (1, 512, 4, 4)\n (1, 1024, 2, 2)\n (1, 2048, 1, 1)\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
gcb = (self.gcb if self.stage_with_gcb[i] else None)
planes = (64 * (2 ** i))
res_layer = make_res_layer(self.block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, groups=self.groups, base_width=self.base_width, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, gcb=gcb)
self.inplanes = (planes * self.block.expansion)
layer_name = 'layer{}'.format((i + 1))
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
|
@HEADS.register_module
class ConvFCBBoxHead(BBoxHead):
'More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n '
def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, *args, **kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0)
if ((num_cls_convs > 0) or (num_reg_convs > 0)):
assert (num_shared_fcs == 0)
if (not self.with_cls):
assert ((num_cls_convs == 0) and (num_cls_fcs == 0))
if (not self.with_reg):
assert ((num_reg_convs == 0) and (num_reg_fcs == 0))
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
(self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
(self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
(self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)):
if (self.num_cls_fcs == 0):
self.cls_last_dim *= self.roi_feat_area
if (self.num_reg_fcs == 0):
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes))
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = (self.fc_cls(x_cls) if self.with_cls else None)
bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None)
return (cls_score, bbox_pred)
|
@HEADS.register_module
class SharedFCBBoxHead(ConvFCBBoxHead):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert (num_fcs >= 1)
super(SharedFCBBoxHead, self).__init__(*args, num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
|
class BasicResBlock(nn.Module):
'Basic residual block.\n\n This block is a little different from the block in the ResNet backbone.\n The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n Args:\n in_channels (int): Channels of the input feature map.\n out_channels (int): Channels of the output feature map.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n '
def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN')):
super(BasicResBlock, self).__init__()
self.conv1 = ConvModule(in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
self.conv2 = ConvModule(in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.conv_identity = ConvModule(in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = (x + identity)
out = self.relu(out)
return out
|
@HEADS.register_module
class DoubleConvFCBBoxHead(BBoxHead):
'Bbox head used in Double-Head R-CNN\n\n /-> cls\n /-> shared convs ->\n \\-> reg\n roi features\n /-> cls\n \\-> shared fc ->\n \\-> reg\n '
def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert (num_convs > 0)
assert (num_fcs > 0)
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels)
self.conv_branch = self._add_conv_branch()
self.fc_branch = self._add_fc_branch()
out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes))
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
'Add the fc branch which consists of a sequential of conv layers.'
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=(self.conv_out_channels // 4), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
'Add the fc branch which consists of a sequential of fc layers.'
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = ((self.in_channels * self.roi_feat_area) if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def init_weights(self):
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fc_branch.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def forward(self, x_cls, x_reg):
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), (- 1))
bbox_pred = self.fc_reg(x_conv)
x_fc = x_cls.view(x_cls.size(0), (- 1))
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return (cls_score, bbox_pred)
|
def build(cfg, registry, default_args=None):
if isinstance(cfg, list):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
|
def build_backbone(cfg):
return build(cfg, BACKBONES)
|
def build_neck(cfg):
return build(cfg, NECKS)
|
def build_roi_extractor(cfg):
return build(cfg, ROI_EXTRACTORS)
|
def build_shared_head(cfg):
return build(cfg, SHARED_HEADS)
|
def build_head(cfg):
return build(cfg, HEADS)
|
def build_loss(cfg):
return build(cfg, LOSSES)
|
def build_detector(cfg, train_cfg=None, test_cfg=None):
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
@DETECTORS.register_module
class ATSS(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
@DETECTORS.register_module
class DoubleHeadRCNN(TwoStageDetector):
def __init__(self, reg_roi_scale_factor, **kwargs):
super().__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def forward_dummy(self, img):
outs = ()
x = self.extract_feat(img)
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = (outs + (rpn_outs,))
proposals = torch.randn(1000, 4).to(device=img.device)
rois = bbox2roi([proposals])
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
outs += (cls_score, bbox_pred)
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = (rpn_outs + (gt_bboxes, img_metas, self.train_cfg.rpn))
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn)
proposal_inputs = (rpn_outs + (img_metas, proposal_cfg))
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
if (self.with_bbox or self.with_mask):
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if (gt_bboxes_ignore is None):
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
losses.update(loss_bbox)
if self.with_mask:
if (not self.share_roi_extractor):
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(x[:self.mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
pos_inds = []
device = bbox_cls_feats.device
for res in sampling_results:
pos_inds.append(torch.ones(res.pos_bboxes.shape[0], device=device, dtype=torch.uint8))
pos_inds.append(torch.zeros(res.neg_bboxes.shape[0], device=device, dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_cls_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels)
losses.update(loss_mask)
return losses
def simple_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False):
'Test only det bboxes without augmentation.'
rois = bbox2roi(proposals)
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
(det_bboxes, det_labels) = self.bbox_head.get_det_bboxes(rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg)
return (det_bboxes, det_labels)
|
@DETECTORS.register_module
class FastRCNN(TwoStageDetector):
def __init__(self, backbone, bbox_roi_extractor, bbox_head, train_cfg, test_cfg, neck=None, shared_head=None, mask_roi_extractor=None, mask_head=None, pretrained=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, shared_head=shared_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
'\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n proposals (List[List[Tensor]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. The Tensor should have a shape Px4, where\n P is the number of proposals.\n '
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError('{} must be a list, but got {}'.format(name, type(var)))
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError('num of augmentations ({}) != num of image meta ({})'.format(len(imgs), len(img_metas)))
imgs_per_gpu = imgs[0].size(0)
assert (imgs_per_gpu == 1)
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
assert NotImplementedError
|
@DETECTORS.register_module
class FasterRCNN(TwoStageDetector):
def __init__(self, backbone, rpn_head, bbox_roi_extractor, bbox_head, train_cfg, test_cfg, neck=None, shared_head=None, pretrained=None):
super(FasterRCNN, self).__init__(backbone=backbone, neck=neck, shared_head=shared_head, rpn_head=rpn_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
|
@DETECTORS.register_module
class FCOS(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
@DETECTORS.register_module
class FOVEA(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
@DETECTORS.register_module
class MaskRCNN(TwoStageDetector):
def __init__(self, backbone, rpn_head, bbox_roi_extractor, bbox_head, mask_roi_extractor, mask_head, train_cfg, test_cfg, neck=None, shared_head=None, pretrained=None):
super(MaskRCNN, self).__init__(backbone=backbone, neck=neck, shared_head=shared_head, rpn_head=rpn_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
|
@DETECTORS.register_module
class RetinaNet(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
@DETECTORS.register_module
class SingleStageDetector(BaseDetector):
'Base class for single-stage detectors.\n\n Single-stage detectors directly and densely predict bounding boxes on the\n output features of the backbone+neck.\n '
def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if (neck is not None):
self.neck = builder.build_neck(neck)
self.bbox_head = builder.build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
'Directly extract features from the backbone+neck.'
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n '
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
x = self.extract_feat(img)
outs = self.bbox_head(x)
loss_inputs = (outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg))
losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = (outs + (img_metas, self.test_cfg, rescale))
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
|
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk,)
return_single = True
else:
return_single = False
maxk = max(topk)
(_, pred_label) = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / pred.size(0))))
return (res[0] if return_single else res)
|
class Accuracy(nn.Module):
def __init__(self, topk=(1,)):
super().__init__()
self.topk = topk
def forward(self, pred, target):
return accuracy(pred, target, self.topk)
|
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'):
assert (beta > 0)
assert ((pred.size() == target.size()) and (target.numel() > 0))
diff = torch.abs((pred - target))
b = ((np.e ** (gamma / alpha)) - 1)
loss = torch.where((diff < beta), ((((alpha / b) * ((b * diff) + 1)) * torch.log((((b * diff) / beta) + 1))) - (alpha * diff)), (((gamma * diff) + (gamma / b)) - (alpha * beta)))
return loss
|
@LOSSES.register_module
class BalancedL1Loss(nn.Module):
'Balanced L1 Loss.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n '
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_bbox = (self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_bbox
|
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
loss = F.cross_entropy(pred, label, reduction='none')
if (weight is not None):
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.