code stringlengths 17 6.64M |
|---|
def init_random_seed(seed=None, device='cuda'):
"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n "
if (seed is not None):
return seed
(rank, world_size) = get_dist_info()
seed = np.random.randint((2 ** 31))
if (world_size == 1):
return seed
if (rank == 0):
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
|
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(log_level=cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead')
if ('samples_per_gpu' in cfg.data):
logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type'])
data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=cfg.data.get('persistent_workers', False)) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
if ('runner' not in cfg):
cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
elif ('total_epochs' in cfg):
assert (cfg.total_epochs == cfg.runner.max_epochs)
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if (val_samples_per_gpu > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
if ((cfg.resume_from is None) and cfg.get('auto_resume')):
resume_from = find_latest_checkpoint(cfg.work_dir)
if (resume_from is not None):
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
|
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
|
def build_anchor_generator(cfg, default_args=None):
warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
@PRIOR_GENERATORS.register_module()
class PointGenerator():
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1))
if row_major:
return (xx, yy)
else:
return (yy, xx)
def grid_points(self, featmap_size, stride=16, device='cuda'):
(feat_h, feat_w) = featmap_size
shift_x = (torch.arange(0.0, feat_w, device=device) * stride)
shift_y = (torch.arange(0.0, feat_h, device=device) * stride)
(shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0],), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1))
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_size, valid_size, device='cuda'):
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert ((valid_h <= feat_h) and (valid_w <= feat_w))
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
(valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y)
valid = (valid_xx & valid_yy)
return valid
|
@PRIOR_GENERATORS.register_module()
class MlvlPointGenerator():
'Standard points generator for multi-level (Mlvl) feature maps in 2D\n points-based detectors.\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels in order (w, h).\n offset (float): The offset of points, the value is normalized with\n corresponding stride. Defaults to 0.5.\n '
def __init__(self, strides, offset=0.5):
self.strides = [_pair(stride) for stride in strides]
self.offset = offset
@property
def num_levels(self):
'int: number of feature levels that the generator will be applied'
return len(self.strides)
@property
def num_base_priors(self):
'list[int]: The number of priors (points) at a point\n on the feature grid'
return [1 for _ in range(len(self.strides))]
def _meshgrid(self, x, y, row_major=True):
(yy, xx) = torch.meshgrid(y, x)
if row_major:
return (xx.reshape((- 1)), yy.reshape((- 1)))
else:
return (yy.reshape((- 1)), xx.reshape((- 1)))
def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False):
'Generate grid points of multiple feature levels.\n\n Args:\n featmap_sizes (list[tuple]): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str): The device where the anchors will be put on.\n with_stride (bool): Whether to concatenate the stride to\n the last dimension of points.\n\n Return:\n list[torch.Tensor]: Points of multiple feature levels.\n The sizes of each tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n '
assert (self.num_levels == len(featmap_sizes))
multi_level_priors = []
for i in range(self.num_levels):
priors = self.single_level_grid_priors(featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride)
multi_level_priors.append(priors)
return multi_level_priors
def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False):
"Generate grid Points of a single level.\n\n Note:\n This function is usually called by method ``self.grid_priors``.\n\n Args:\n featmap_size (tuple[int]): Size of the feature maps, arrange as\n (h, w).\n level_idx (int): The index of corresponding feature map level.\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str, optional): The device the tensor will be put on.\n Defaults to 'cuda'.\n with_stride (bool): Concatenate the stride to the last dimension\n of points.\n\n Return:\n Tensor: Points of single feature levels.\n The shape of tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n "
(feat_h, feat_w) = featmap_size
(stride_w, stride_h) = self.strides[level_idx]
shift_x = ((torch.arange(0, feat_w, device=device) + self.offset) * stride_w)
shift_x = shift_x.to(dtype)
shift_y = ((torch.arange(0, feat_h, device=device) + self.offset) * stride_h)
shift_y = shift_y.to(dtype)
(shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y)
if (not with_stride):
shifts = torch.stack([shift_xx, shift_yy], dim=(- 1))
else:
stride_w = shift_xx.new_full((shift_xx.shape[0],), stride_w).to(dtype)
stride_h = shift_xx.new_full((shift_yy.shape[0],), stride_h).to(dtype)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=(- 1))
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
'Generate valid flags of points of multiple feature levels.\n\n Args:\n featmap_sizes (list(tuple)): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n pad_shape (tuple(int)): The padded shape of the image,\n arrange as (h, w).\n device (str): The device where the anchors will be put on.\n\n Return:\n list(torch.Tensor): Valid flags of points of multiple levels.\n '
assert (self.num_levels == len(featmap_sizes))
multi_level_flags = []
for i in range(self.num_levels):
point_stride = self.strides[i]
(feat_h, feat_w) = featmap_sizes[i]
(h, w) = pad_shape[:2]
valid_feat_h = min(int(np.ceil((h / point_stride[1]))), feat_h)
valid_feat_w = min(int(np.ceil((w / point_stride[0]))), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'):
"Generate the valid flags of points of a single feature map.\n\n Args:\n featmap_size (tuple[int]): The size of feature maps, arrange as\n as (h, w).\n valid_size (tuple[int]): The valid size of the feature maps.\n The size arrange as as (h, w).\n device (str, optional): The device where the flags will be put on.\n Defaults to 'cuda'.\n\n Returns:\n torch.Tensor: The valid flags of each points in a single level feature map.\n "
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert ((valid_h <= feat_h) and (valid_w <= feat_w))
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
(valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y)
valid = (valid_xx & valid_yy)
return valid
def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'):
'Generate sparse points according to the ``prior_idxs``.\n\n Args:\n prior_idxs (Tensor): The index of corresponding anchors\n in the feature map.\n featmap_size (tuple[int]): feature map size arrange as (w, h).\n level_idx (int): The level index of corresponding feature\n map.\n dtype (obj:`torch.dtype`): Date type of points. Defaults to\n ``torch.float32``.\n device (obj:`torch.device`): The device where the points is\n located.\n Returns:\n Tensor: Anchor with shape (N, 2), N should be equal to\n the length of ``prior_idxs``. And last dimension\n 2 represent (coord_x, coord_y).\n '
(height, width) = featmap_size
x = (((prior_idxs % width) + self.offset) * self.strides[level_idx][0])
y = ((((prior_idxs // width) % height) + self.offset) * self.strides[level_idx][1])
prioris = torch.stack([x, y], 1).to(dtype)
prioris = prioris.to(device)
return prioris
|
class AssignResult(util_mixins.NiceRepr):
'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n '
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
self._extra_properties = {}
@property
def num_preds(self):
'int: the number of predictions in this assignment'
return len(self.gt_inds)
def set_extra_property(self, key, value):
'Set user-defined new property.'
assert (key not in self.info)
self._extra_properties[key] = value
def get_extra_property(self, key):
'Get user-defined property.'
return self._extra_properties.get(key, None)
@property
def info(self):
'dict: a dictionary of info about the object'
basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
'str: a "nice" summary string describing this assign result'
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if (self.gt_inds is None):
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if (self.max_overlaps is None):
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}')
if (self.labels is None):
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
@classmethod
def random(cls, **kwargs):
'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assigned to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n '
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned].long()
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n '
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels])
|
class BaseAssigner(metaclass=ABCMeta):
'Base assigner that assigns boxes to ground truth boxes.'
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
'Assign boxes to either a ground truth boxes or a negative boxes.'
|
@BBOX_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
'Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, regression L1 cost and regression iou cost. The\n targets don\'t include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): "iou" (intersection over union), "iof"\n (intersection over foreground), or "giou" (generalized\n intersection over union). Default "giou".\n '
def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
self.cls_cost = build_match_cost(cls_cost)
self.reg_cost = build_match_cost(reg_cost)
self.iou_cost = build_match_cost(iou_cost)
def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07):
"Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n "
assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.'
(num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0))
assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_bboxes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
(img_h, img_w, _) = img_meta['img_shape']
factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0)
cls_cost = self.cls_cost(cls_pred, gt_labels)
normalize_gt_bboxes = (gt_bboxes / factor)
reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor)
iou_cost = self.iou_cost(bboxes, gt_bboxes)
cost = ((cls_cost + reg_cost) + iou_cost)
cost = cost.detach().cpu()
if (linear_sum_assignment is None):
raise ImportError('Please run "pip install scipy" to install scipy first.')
(matched_row_inds, matched_col_inds) = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device)
assigned_gt_inds[:] = 0
assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1)
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
|
@BBOX_ASSIGNERS.register_module()
class MaskHungarianAssigner(BaseAssigner):
"Computes one-to-one matching between predictions and ground truth for\n mask.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, mask focal cost and mask dice cost. The\n targets don't include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config.\n mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config.\n dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config.\n "
def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0)):
self.cls_cost = build_match_cost(cls_cost)
self.mask_cost = build_match_cost(mask_cost)
self.dice_cost = build_match_cost(dice_cost)
def assign(self, cls_pred, mask_pred, gt_labels, gt_mask, img_meta, gt_bboxes_ignore=None, eps=1e-07):
"Computes one-to-one matching based on the weighted costs.\n\n Args:\n cls_pred (Tensor): Class prediction in shape\n (num_query, cls_out_channels).\n mask_pred (Tensor): Mask prediction in shape (num_query, H, W).\n gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ).\n gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n "
assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.'
(num_gt, num_query) = (gt_labels.shape[0], cls_pred.shape[0])
assigned_gt_inds = cls_pred.new_full((num_query,), (- 1), dtype=torch.long)
assigned_labels = cls_pred.new_full((num_query,), (- 1), dtype=torch.long)
if ((num_gt == 0) or (num_query == 0)):
if (num_gt == 0):
assigned_gt_inds[:] = 0
return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels)
if ((self.cls_cost.weight != 0) and (cls_pred is not None)):
cls_cost = self.cls_cost(cls_pred, gt_labels)
else:
cls_cost = 0
if (self.mask_cost.weight != 0):
mask_cost = self.mask_cost(mask_pred, gt_mask)
else:
mask_cost = 0
if (self.dice_cost.weight != 0):
dice_cost = self.dice_cost(mask_pred, gt_mask)
else:
dice_cost = 0
cost = ((cls_cost + mask_cost) + dice_cost)
cost = cost.detach().cpu()
if (linear_sum_assignment is None):
raise ImportError('Please run "pip install scipy" to install scipy first.')
(matched_row_inds, matched_col_inds) = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(cls_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(cls_pred.device)
assigned_gt_inds[:] = 0
assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1)
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels)
|
@BBOX_ASSIGNERS.register_module()
class UniformAssigner(BaseAssigner):
'Uniform Matching between the anchors and gt boxes, which can achieve\n balance in positive anchors, and gt_bboxes_ignore was not considered for\n now.\n\n Args:\n pos_ignore_thr (float): the threshold to ignore positive anchors\n neg_ignore_thr (float): the threshold to ignore negative anchors\n match_times(int): Number of positive anchors for each gt box.\n Default 4.\n iou_calculator (dict): iou_calculator config\n '
def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')):
self.match_times = match_times
self.pos_ignore_thr = pos_ignore_thr
self.neg_ignore_thr = neg_ignore_thr
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
(num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0))
assigned_gt_inds = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_bboxes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
assign_result = AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
assign_result.set_extra_property('pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))
assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4)))
assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4)))
return assign_result
cost_bbox = torch.cdist(bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
cost_bbox_anchors = torch.cdist(bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
C = cost_bbox.cpu()
C1 = cost_bbox_anchors.cpu()
index = torch.topk(C, k=self.match_times, dim=0, largest=False)[1]
index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]
indexes = torch.cat((index, index1), dim=1).reshape((- 1)).to(bbox_pred.device)
pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)
anchor_overlaps = self.iou_calculator(anchor, gt_bboxes)
(pred_max_overlaps, _) = pred_overlaps.max(dim=1)
(anchor_max_overlaps, _) = anchor_overlaps.max(dim=0)
ignore_idx = (pred_max_overlaps > self.neg_ignore_thr)
assigned_gt_inds[ignore_idx] = (- 1)
pos_gt_index = torch.arange(0, C1.size(1), device=bbox_pred.device).repeat((self.match_times * 2))
pos_ious = anchor_overlaps[(indexes, pos_gt_index)]
pos_ignore_idx = (pos_ious < self.pos_ignore_thr)
pos_gt_index_with_ignore = (pos_gt_index + 1)
pos_gt_index_with_ignore[pos_ignore_idx] = (- 1)
assigned_gt_inds[indexes] = pos_gt_index_with_ignore
if (gt_labels is not None):
assigned_labels = assigned_gt_inds.new_full((num_bboxes,), (- 1))
pos_inds = torch.nonzero((assigned_gt_inds > 0), as_tuple=False).squeeze()
if (pos_inds.numel() > 0):
assigned_labels[pos_inds] = gt_labels[(assigned_gt_inds[pos_inds] - 1)]
else:
assigned_labels = None
assign_result = AssignResult(num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels)
assign_result.set_extra_property('pos_idx', (~ pos_ignore_idx))
assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes])
assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index])
return assign_result
|
def build_assigner(cfg, **default_args):
'Builder of box assigner.'
return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
|
def build_sampler(cfg, **default_args):
'Builder of box sampler.'
return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
|
def build_bbox_coder(cfg, **default_args):
'Builder of box coder.'
return build_from_cfg(cfg, BBOX_CODERS, default_args)
|
class BaseBBoxCoder(metaclass=ABCMeta):
'Base bounding box coder.'
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
'Encode deltas between bboxes and ground truth boxes.'
@abstractmethod
def decode(self, bboxes, bboxes_pred):
'Decode the predicted bboxes according to prediction and base\n boxes.'
|
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
'Distance Point BBox coder.\n\n This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n right) and decode it back to the original.\n\n Args:\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n '
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
'Encode bounding box to distances.\n\n Args:\n points (Tensor): Shape (N, 2), The format is [x, y].\n gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"\n max_dis (float): Upper bound of the distance. Default None.\n eps (float): a small value to ensure target < max_dis, instead <=.\n Default 0.1.\n\n Returns:\n Tensor: Box transformation deltas. The shape is (N, 4).\n '
assert (points.size(0) == gt_bboxes.size(0))
assert (points.size((- 1)) == 2)
assert (gt_bboxes.size((- 1)) == 4)
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
'Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (B, N, 2) or (N, 2).\n pred_bboxes (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom). Shape (B, N, 4)\n or (N, 4)\n max_shape (Sequence[int] or torch.Tensor or Sequence[\n Sequence[int]],optional): Maximum bounds for boxes, specifies\n (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n the max_shape should be a Sequence[Sequence[int]],\n and the length of max_shape should also be B.\n Default None.\n Returns:\n Tensor: Boxes with shape (N, 4) or (B, N, 4)\n '
assert (points.size(0) == pred_bboxes.size(0))
assert (points.size((- 1)) == 2)
assert (pred_bboxes.size((- 1)) == 4)
if (self.clip_border is False):
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
@BBOX_CODERS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
'Pseudo bounding box coder.'
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
'torch.Tensor: return the given ``bboxes``'
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
'torch.Tensor: return the given ``pred_bboxes``'
return pred_bboxes
|
def build_iou_calculator(cfg, default_args=None):
'Builder of IoU calculator.'
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
def build_match_cost(cfg, default_args=None):
'Builder of IoU calculator.'
return build_from_cfg(cfg, MATCH_COST, default_args)
|
@BBOX_SAMPLERS.register_module()
class CombinedSampler(BaseSampler):
'A sampler that combines positive sampler and negative sampler.'
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
'Sample positive samples.'
raise NotImplementedError
def _sample_neg(self, **kwargs):
'Sample negative samples.'
raise NotImplementedError
|
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
'Instance balanced sampler that samples equal number of positive samples\n for each instance.'
def _sample_pos(self, assign_result, num_expected, **kwargs):
'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected positive samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n '
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False)
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int((round((num_expected / float(num_gts))) + 1))
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero((assign_result.gt_inds == i.item()), as_tuple=False)
if (inds.numel() != 0):
inds = inds.squeeze(1)
else:
continue
if (len(inds) > num_per_gt):
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((set(pos_inds.cpu()) - set(sampled_inds.cpu()))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif (len(sampled_inds) > num_expected):
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
@BBOX_SAMPLERS.register_module()
class IoUBalancedNegSampler(RandomSampler):
'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n The others are sampled from proposals whose IoU are higher than\n `floor_thr`. These proposals are sampled from some bins evenly, which are\n split by `num_bins` via IoU evenly.\n\n Args:\n num (int): number of proposals.\n pos_fraction (float): fraction of positive proposals.\n floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n set to -1 if all using IoU balanced sampling.\n floor_fraction (float): sampling fraction of proposals under floor_thr.\n num_bins (int): number of bins in IoU balanced sampling.\n '
def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs)
assert ((floor_thr >= 0) or (floor_thr == (- 1)))
assert (0 <= floor_fraction <= 1)
assert (num_bins >= 1)
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
'Sample according to the iou interval.\n\n Args:\n max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n truth boxes.\n full_set (set(int)): A full set of indices of boxes。\n num_expected (int): Number of expected samples。\n\n Returns:\n np.ndarray: Indices of samples\n '
max_iou = max_overlaps.max()
iou_interval = ((max_iou - self.floor_thr) / self.num_bins)
per_num_expected = int((num_expected / self.num_bins))
sampled_inds = []
for i in range(self.num_bins):
start_iou = (self.floor_thr + (i * iou_interval))
end_iou = (self.floor_thr + ((i + 1) * iou_interval))
tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0])
tmp_inds = list((tmp_set & full_set))
if (len(tmp_inds) > per_num_expected):
tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((full_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected negative samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n '
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False)
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
neg_set = set(neg_inds.cpu().numpy())
if (self.floor_thr > 0):
floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0])
iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0])
elif (self.floor_thr == 0):
floor_set = set(np.where((max_overlaps == 0))[0])
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
else:
floor_set = set()
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
self.floor_thr = 0
floor_neg_inds = list((floor_set & neg_set))
iou_sampling_neg_inds = list((iou_sampling_set & neg_set))
num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction)))
if (len(iou_sampling_neg_inds) > num_expected_iou_sampling):
if (self.num_bins >= 2):
iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = (num_expected - len(iou_sampled_inds))
if (len(floor_neg_inds) > num_expected_floor):
sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds))
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((neg_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device)
return sampled_inds
|
@BBOX_SAMPLERS.register_module()
class MaskPseudoSampler(BaseSampler):
'A pseudo sampler that does not do sampling actually.'
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
'Sample positive samples.'
raise NotImplementedError
def _sample_neg(self, **kwargs):
'Sample negative samples.'
raise NotImplementedError
def sample(self, assign_result, masks, gt_masks, **kwargs):
'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n masks (torch.Tensor): Bounding boxes\n gt_masks (torch.Tensor): Ground truth boxes\n Returns:\n :obj:`SamplingResult`: sampler results\n '
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique()
gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8)
sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags)
return sampling_result
|
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
'Online Hard Example Mining Sampler described in `Training Region-based\n Object Detectors with Online Hard Example Mining\n <https://arxiv.org/abs/1604.03540>`_.\n '
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, loss_key='loss_cls', **kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.context = context
if (not hasattr(self.context, 'num_stages')):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
self.loss_key = loss_key
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if (not hasattr(self.context, 'num_stages')):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key]
(_, topk_loss_inds) = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected positive samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of positive samples\n '
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False)
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats)
def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected negative samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of negative samples\n '
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False)
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
|
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
'A pseudo sampler that does not do sampling actually.'
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
'Sample positive samples.'
raise NotImplementedError
def _sample_neg(self, **kwargs):
'Sample negative samples.'
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):
'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n bboxes (torch.Tensor): Bounding boxes\n gt_bboxes (torch.Tensor): Ground truth boxes\n\n Returns:\n :obj:`SamplingResult`: sampler results\n '
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
return sampling_result
|
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
'Random sampler.\n\n Args:\n num (int): Number of samples\n pos_fraction (float): Fraction of positive samples\n neg_pos_up (int, optional): Upper bound number of negative and\n positive samples. Defaults to -1.\n add_gt_as_proposals (bool, optional): Whether to add ground truth\n boxes as proposals. Defaults to True.\n '
def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
'Random select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n '
assert (len(gallery) >= num)
is_tensor = isinstance(gallery, torch.Tensor)
if (not is_tensor):
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if (not is_tensor):
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
'Randomly sample some positive samples.'
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False)
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
'Randomly sample some negative samples.'
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False)
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
class GeneralData(NiceRepr):
'A general data structure of OpenMMlab.\n\n A data structure that stores the meta information,\n the annotations of the images or the model predictions,\n which can be used in communication between components.\n\n The attributes in `GeneralData` are divided into two parts,\n the `meta_info_fields` and the `data_fields` respectively.\n\n - `meta_info_fields`: Usually contains the\n information about the image such as filename,\n image_shape, pad_shape, etc. All attributes in\n it are immutable once set,\n but the user can add new meta information with\n `set_meta_info` function, all information can be accessed\n with methods `meta_info_keys`, `meta_info_values`,\n `meta_info_items`.\n\n - `data_fields`: Annotations or model predictions are\n stored. The attributes can be accessed or modified by\n dict-like or object-like operations, such as\n `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`,\n `values()`, `items()`. Users can also apply tensor-like methods\n to all obj:`torch.Tensor` in the `data_fileds`,\n such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()`\n `.detach()`, `.numpy()`\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of single image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of single image or\n model predictions. Default: None.\n\n Examples:\n >>> from mmdet.core import GeneralData\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> instance_data = GeneralData(meta_info=img_meta)\n >>> img_shape in instance_data\n True\n >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3])\n >>> print(results)\n <GeneralData(\n\n META INFORMATION\n img_shape: (800, 1196, 3)\n pad_shape: (800, 1216, 3)\n\n DATA FIELDS\n shape of det_labels: torch.Size([4])\n shape of det_scores: torch.Size([4])\n\n ) at 0x7f84acd10f90>\n >>> instance_data.det_scores\n tensor([0.0100, 0.1000, 0.2000, 0.3000])\n >>> instance_data.det_labels\n tensor([0, 1, 2, 3])\n >>> instance_data[\'det_labels\']\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n True\n >>> instance_data.img_shape\n (800, 1196, 3)\n >>> \'det_scores\' in instance_data\n True\n >>> del instance_data.det_scores\n >>> \'det_scores\' in instance_data\n False\n >>> det_labels = instance_data.pop(\'det_labels\', None)\n >>> det_labels\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n >>> False\n '
def __init__(self, meta_info=None, data=None):
self._meta_info_fields = set()
self._data_fields = set()
if (meta_info is not None):
self.set_meta_info(meta_info=meta_info)
if (data is not None):
self.set_data(data)
def set_meta_info(self, meta_info):
'Add meta information.\n\n Args:\n meta_info (dict): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n '
assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}'
meta = copy.deepcopy(meta_info)
for (k, v) in meta.items():
if (k in self._meta_info_fields):
ori_value = getattr(self, k)
if isinstance(ori_value, (torch.Tensor, np.ndarray)):
if (ori_value == v).all():
continue
else:
raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ')
elif (ori_value == v):
continue
else:
raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ')
else:
self._meta_info_fields.add(k)
self.__dict__[k] = v
def set_data(self, data):
'Update a dict to `data_fields`.\n\n Args:\n data (dict): A dict contains annotations of image or\n model predictions. Default: None.\n '
assert isinstance(data, dict), f'meta should be a `dict` but get {data}'
for (k, v) in data.items():
self.__setattr__(k, v)
def new(self, meta_info=None, data=None):
'Return a new results with same image meta information.\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of image or\n model predictions. Default: None.\n '
new_data = self.__class__()
new_data.set_meta_info(dict(self.meta_info_items()))
if (meta_info is not None):
new_data.set_meta_info(meta_info)
if (data is not None):
new_data.set_data(data)
return new_data
def keys(self):
'\n Returns:\n list: Contains all keys in data_fields.\n '
return [key for key in self._data_fields]
def meta_info_keys(self):
'\n Returns:\n list: Contains all keys in meta_info_fields.\n '
return [key for key in self._meta_info_fields]
def values(self):
'\n Returns:\n list: Contains all values in data_fields.\n '
return [getattr(self, k) for k in self.keys()]
def meta_info_values(self):
'\n Returns:\n list: Contains all values in meta_info_fields.\n '
return [getattr(self, k) for k in self.meta_info_keys()]
def items(self):
for k in self.keys():
(yield (k, getattr(self, k)))
def meta_info_items(self):
for k in self.meta_info_keys():
(yield (k, getattr(self, k)))
def __setattr__(self, name, val):
if (name in ('_meta_info_fields', '_data_fields')):
if (not hasattr(self, name)):
super().__setattr__(name, val)
else:
raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ')
else:
if (name in self._meta_info_fields):
raise AttributeError(f'`{name}` is used in meta information,which is immutable')
self._data_fields.add(name)
super().__setattr__(name, val)
def __delattr__(self, item):
if (item in ('_meta_info_fields', '_data_fields')):
raise AttributeError(f'{item} has been used as a private attribute, which is immutable. ')
if (item in self._meta_info_fields):
raise KeyError(f'{item} is used in meta information, which is immutable.')
super().__delattr__(item)
if (item in self._data_fields):
self._data_fields.remove(item)
__setitem__ = __setattr__
__delitem__ = __delattr__
def __getitem__(self, name):
return getattr(self, name)
def get(self, *args):
assert (len(args) < 3), '`get` get more than 2 arguments'
return self.__dict__.get(*args)
def pop(self, *args):
assert (len(args) < 3), '`pop` get more than 2 arguments'
name = args[0]
if (name in self._meta_info_fields):
raise KeyError(f'{name} is a key in meta information, which is immutable')
if (args[0] in self._data_fields):
self._data_fields.remove(args[0])
return self.__dict__.pop(*args)
elif (len(args) == 2):
return args[1]
else:
raise KeyError(f'{args[0]}')
def __contains__(self, item):
return ((item in self._data_fields) or (item in self._meta_info_fields))
def to(self, *args, **kwargs):
'Apply same name function to all tensors in data_fields.'
new_data = self.new()
for (k, v) in self.items():
if hasattr(v, 'to'):
v = v.to(*args, **kwargs)
new_data[k] = v
return new_data
def cpu(self):
'Apply same name function to all tensors in data_fields.'
new_data = self.new()
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
new_data[k] = v
return new_data
def cuda(self):
'Apply same name function to all tensors in data_fields.'
new_data = self.new()
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
v = v.cuda()
new_data[k] = v
return new_data
def detach(self):
'Apply same name function to all tensors in data_fields.'
new_data = self.new()
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
v = v.detach()
new_data[k] = v
return new_data
def numpy(self):
'Apply same name function to all tensors in data_fields.'
new_data = self.new()
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
v = v.detach().cpu().numpy()
new_data[k] = v
return new_data
def __nice__(self):
repr = '\n \n META INFORMATION \n'
for (k, v) in self.meta_info_items():
repr += f'''{k}: {v}
'''
repr += '\n DATA FIELDS \n'
for (k, v) in self.items():
if isinstance(v, (torch.Tensor, np.ndarray)):
repr += f'''shape of {k}: {v.shape}
'''
else:
repr += f'''{k}: {v}
'''
return (repr + '\n')
|
class InstanceData(GeneralData):
'Data structure for instance-level annnotations or predictions.\n\n Subclass of :class:`GeneralData`. All value in `data_fields`\n should have the same length. This design refer to\n https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501\n\n Examples:\n >>> from mmdet.core import InstanceData\n >>> import numpy as np\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> results = InstanceData(img_meta)\n >>> img_shape in results\n True\n >>> results.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])\n >>> results["det_masks"] = np.ndarray(4, 2, 2)\n >>> len(results)\n 4\n >>> print(resutls)\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([4])\n shape of det_masks: (4, 2, 2)\n shape of det_scores: torch.Size([4])\n\n ) at 0x7fe26b5ca990>\n >>> sorted_results = results[results.det_scores.sort().indices]\n >>> sorted_results.det_scores\n tensor([0.0100, 0.3000, 0.6000, 0.7000])\n >>> sorted_results.det_labels\n tensor([0, 3, 2, 1])\n >>> print(results[results.scores > 0.5])\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([2])\n shape of det_masks: (2, 2, 2)\n shape of det_scores: torch.Size([2])\n\n ) at 0x7fe26b6d7790>\n >>> results[results.det_scores > 0.5].det_labels\n tensor([1, 2])\n >>> results[results.det_scores > 0.5].det_scores\n tensor([0.7000, 0.6000])\n '
def __setattr__(self, name, value):
if (name in ('_meta_info_fields', '_data_fields')):
if (not hasattr(self, name)):
super().__setattr__(name, value)
else:
raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray, list)), f'Can set {type(value)}, only support {(torch.Tensor, np.ndarray, list)}'
if self._data_fields:
assert (len(value) == len(self)), f'the length of values {len(value)} is not consistent with the length of this :obj:`InstanceData` {len(self)} '
super().__setattr__(name, value)
def __getitem__(self, item):
'\n Args:\n item (str, obj:`slice`,\n obj`torch.LongTensor`, obj:`torch.BoolTensor`):\n get the corresponding values according to item.\n\n Returns:\n obj:`InstanceData`: Corresponding values.\n '
assert len(self), ' This is a empty instance'
assert isinstance(item, (str, slice, int, torch.LongTensor, torch.BoolTensor))
if isinstance(item, str):
return getattr(self, item)
if (type(item) == int):
if ((item >= len(self)) or (item < (- len(self)))):
raise IndexError(f'Index {item} out of range!')
else:
item = slice(item, None, len(self))
new_data = self.new()
if isinstance(item, torch.Tensor):
assert (item.dim() == 1), 'Only support to get the values along the first dimension.'
if isinstance(item, torch.BoolTensor):
assert (len(item) == len(self)), f'The shape of the input(BoolTensor)) {len(item)} does not match the shape of the indexed tensor in results_filed {len(self)} at first dimension. '
for (k, v) in self.items():
if isinstance(v, torch.Tensor):
new_data[k] = v[item]
elif isinstance(v, np.ndarray):
new_data[k] = v[item.cpu().numpy()]
elif isinstance(v, list):
r_list = []
if isinstance(item, torch.BoolTensor):
indexes = torch.nonzero(item).view((- 1))
else:
indexes = item
for index in indexes:
r_list.append(v[index])
new_data[k] = r_list
else:
for (k, v) in self.items():
new_data[k] = v[item]
return new_data
@staticmethod
def cat(instances_list):
'Concat the predictions of all :obj:`InstanceData` in the list.\n\n Args:\n instances_list (list[:obj:`InstanceData`]): A list\n of :obj:`InstanceData`.\n\n Returns:\n obj:`InstanceData`\n '
assert all((isinstance(results, InstanceData) for results in instances_list))
assert (len(instances_list) > 0)
if (len(instances_list) == 1):
return instances_list[0]
new_data = instances_list[0].new()
for k in instances_list[0]._data_fields:
values = [results[k] for results in instances_list]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, np.ndarray):
values = np.concatenate(values, axis=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
else:
raise ValueError(f'Can not concat the {k} which is a {type(v0)}')
new_data[k] = values
return new_data
def __len__(self):
if len(self._data_fields):
for v in self.values():
return len(v)
else:
raise AssertionError('This is an empty `InstanceData`.')
|
def wider_face_classes():
return ['face']
|
def voc_classes():
return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
def imagenet_det_classes():
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
|
def imagenet_vid_classes():
return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
|
def coco_classes():
return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
|
def cityscapes_classes():
return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def oid_challenge_classes():
return ['Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon']
|
def oid_v6_classes():
return ['Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet']
|
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_classes()'))
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
|
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend([dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return (dynamic_milestones, dynamic_intervals)
|
class EvalHook(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(EvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = (runner.epoch if self.by_epoch else runner.iter)
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
self.interval = self.dynamic_intervals[(step - 1)]
def before_train_epoch(self, runner):
'Evaluate the model only at the start of training by epoch.'
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
'perform evaluation and save ckpt.'
if (not self._should_evaluate(runner)):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and key_score):
self._save_ckpt(runner, key_score)
|
class DistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(DistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = (runner.epoch if self.by_epoch else runner.iter)
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
self.interval = self.dynamic_intervals[(step - 1)]
def before_train_epoch(self, runner):
'Evaluate the model only at the start of training by epoch.'
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
'perform evaluation and save ckpt.'
if self.broadcast_bn_buffer:
model = runner.model
for (name, module) in model.named_modules():
if (isinstance(module, _BatchNorm) and module.track_running_stats):
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if (not self._should_evaluate(runner)):
return
tmpdir = self.tmpdir
if (tmpdir is None):
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect)
if (runner.rank == 0):
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and key_score):
self._save_ckpt(runner, key_score)
|
def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None):
"Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call this function before exporting. This function will:\n\n 1. generate corresponding inputs which are used to execute the model.\n 2. Wrap the model's forward function.\n\n For example, the MMDet models' forward function has a parameter\n ``return_loss:bool``. As we want to set it as False while export API\n supports neither bool type or kwargs. So we have to replace the forward\n method like ``model.forward = partial(model.forward, return_loss=False)``.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n input_config (dict): the exactly data in this dict depends on the\n framework. For MMSeg, we can just declare the input shape,\n and generate the dummy data accordingly. However, for MMDet,\n we may pass the real img path, or the NMS will return None\n as there is no legal bbox.\n\n Returns:\n tuple: (model, tensor_data) wrapped model which can be called by\n ``model(*tensor_data)`` and a list of inputs which are used to\n execute the model while exporting.\n "
model = build_model_from_cfg(config_path, checkpoint_path, cfg_options=cfg_options)
(one_img, one_meta) = preprocess_example_input(input_config)
tensor_data = [one_img]
model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)
opset_version = 11
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(opset_version)
return (model, tensor_data)
|
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
'Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n\n Returns:\n torch.nn.Module: the built model\n '
from mmdet.models import build_detector
cfg = mmcv.Config.fromfile(config_path)
if (cfg_options is not None):
cfg.merge_from_dict(cfg_options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmdet.datasets import DATASETS
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
model.CLASSES = dataset.CLASSES
model.cpu().eval()
return model
|
def preprocess_example_input(input_config):
"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and meta information for the example input image.\n\n Examples:\n >>> from mmdet.core.export import preprocess_example_input\n >>> input_config = {\n >>> 'input_shape': (1,3,224,224),\n >>> 'input_path': 'demo/demo.jpg',\n >>> 'normalize_cfg': {\n >>> 'mean': (123.675, 116.28, 103.53),\n >>> 'std': (58.395, 57.12, 57.375)\n >>> }\n >>> }\n >>> one_img, one_meta = preprocess_example_input(input_config)\n >>> print(one_img.shape)\n torch.Size([1, 3, 224, 224])\n >>> print(one_meta)\n {'img_shape': (224, 224, 3),\n 'ori_shape': (224, 224, 3),\n 'pad_shape': (224, 224, 3),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False}\n "
input_path = input_config['input_path']
input_shape = input_config['input_shape']
one_img = mmcv.imread(input_path)
one_img = mmcv.imresize(one_img, input_shape[2:][::(- 1)])
show_img = one_img.copy()
if ('normalize_cfg' in input_config.keys()):
normalize_cfg = input_config['normalize_cfg']
mean = np.array(normalize_cfg['mean'], dtype=np.float32)
std = np.array(normalize_cfg['std'], dtype=np.float32)
to_rgb = normalize_cfg.get('to_rgb', True)
one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
one_img = one_img.transpose(2, 0, 1)
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)
(_, C, H, W) = input_shape
one_meta = {'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None}
return (one_img, one_meta)
|
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
'Check invalid loss hook.\n\n This hook will regularly check whether the loss is valid\n during training.\n\n Args:\n interval (int): Checking interval (every k iterations).\n Default: 50.\n '
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), runner.logger.info('loss become infinite or NaN!')
|
class BaseEMAHook(Hook):
"Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,\n the original model parameters are actually saved in ema field after train.\n\n Args:\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `ema_param = (1-momentum) * ema_param + momentum * cur_param`.\n Defaults to 0.0002.\n skip_buffers (bool): Whether to skip the model buffers, such as\n batchnorm running stats (running_mean, running_var), it does not\n perform the ema operation. Default to False.\n interval (int): Update ema parameter every interval iteration.\n Defaults to 1.\n resume_from (str, optional): The checkpoint path. Defaults to None.\n momentum_fun (func, optional): The function to change momentum\n during early iteration (also warmup) to help early training.\n It uses `momentum` as a constant. Defaults to None.\n "
def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None):
assert (0 < momentum < 1)
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"To resume model with it's ema parameters more friendly.\n\n Register ema parameter as ``named_buffer`` to model.\n "
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for (name, value) in self.model_parameters.items():
buffer_name = f"ema_{name.replace('.', '_')}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if (self.checkpoint is not None):
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return (self.momentum_fun(runner.iter) if self.momentum_fun else self.momentum)
def after_train_iter(self, runner):
'Update ema parameter every self.interval iterations.'
if (((runner.iter + 1) % self.interval) != 0):
return
momentum = self.get_momentum(runner)
for (name, parameter) in self.model_parameters.items():
if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_((1 - momentum)).add_(parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
'We load parameter values from ema backup to model before the\n EvalHook.'
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"We recover model's parameter from ema backup after last epoch's\n EvalHook."
self._swap_ema_parameters()
def _swap_ema_parameters(self):
'Swap the parameter of model with parameter in ema_buffer.'
for (name, value) in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
|
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
'EMAHook using exponential momentum strategy.\n\n Args:\n total_iter (int): The total number of iterations of EMA momentum.\n Defaults to 2000.\n '
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = (lambda x: (((1 - self.momentum) * math.exp(((- (1 + x)) / total_iter))) + self.momentum))
|
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
'EMAHook using linear momentum strategy.\n\n Args:\n warm_up (int): During first warm_up steps, we may use smaller decay\n to update ema parameters more slowly. Defaults to 100.\n '
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = (lambda x: min((self.momentum ** self.interval), ((1 + x) / (warm_up + x))))
|
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"Set runner's epoch information to the model."
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_module_wrapper(model):
model = model.module
model.set_epoch(epoch)
|
def get_norm_states(module):
async_norm_states = OrderedDict()
for (name, child) in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for (k, v) in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
|
@HOOKS.register_module()
class SyncNormHook(Hook):
'Synchronize Norm states after training epoch, currently used in YOLOX.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to switch to synchronizing norm interval. Default: 15.\n interval (int): Synchronizing norm interval. Default: 1.\n '
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)):
self.interval = 1
def after_train_epoch(self, runner):
'Synchronizing norm.'
epoch = runner.epoch
module = runner.model
if (((epoch + 1) % self.interval) == 0):
(_, world_size) = get_dist_info()
if (world_size == 1):
return
norm_states = get_norm_states(module)
if (len(norm_states) == 0):
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"Change and synchronize the random image size across ranks.\n SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve\n similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),\n (832, 832)], multiscale_mode='range', keep_ratio=True)`.\n\n Note: Due to the multi-process dataloader, its behavior is different\n from YOLOX's official implementation, the official is to change the\n size every fixed iteration interval and what we achieved is a fixed\n epoch interval.\n\n Args:\n ratio_range (tuple[int]): Random ratio range. It will be multiplied\n by 32, and then change the dataset output image size.\n Default: (14, 26).\n img_scale (tuple[int]): Size of input image. Default: (640, 640).\n interval (int): The epoch interval of change image size. Default: 1.\n device (torch.device | str): device for returned tensors.\n Default: 'cuda'.\n "
def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'):
warnings.warn("DeprecationWarning: SyncRandomSizeHook is deprecated. Please use Resize pipeline to achieve similar functions. Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval.")
(self.rank, world_size) = get_dist_info()
self.is_distributed = (world_size > 1)
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
'Change the dataset output image size.'
if ((self.ratio_range is not None) and (((runner.epoch + 1) % self.interval) == 0)):
tensor = torch.LongTensor(2).to(self.device)
if (self.rank == 0):
size_factor = ((self.img_scale[1] * 1.0) / self.img_scale[0])
size = random.randint(*self.ratio_range)
size = (int((32 * size)), (32 * int((size * size_factor))))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale((tensor[0].item(), tensor[1].item()))
|
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
'YOLOX learning rate scheme.\n\n There are two main differences between YOLOXLrUpdaterHook\n and CosineAnnealingLrUpdaterHook.\n\n 1. When the current running epoch is greater than\n `max_epoch-last_epoch`, a fixed learning rate will be used\n 2. The exp warmup scheme is different with LrUpdaterHook in MMCV\n\n Args:\n num_last_epochs (int): The number of epochs with a fixed learning rate\n before the end of the training.\n '
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
k = (self.warmup_ratio * pow(((cur_iters + 1) / float(self.warmup_iters)), 2))
warmup_lr = [(_lr * k) for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for (key, base_lr) in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = (len(runner.data_loader) * self.num_last_epochs)
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if (self.min_lr_ratio is not None):
target_lr = (base_lr * self.min_lr_ratio)
else:
target_lr = self.min_lr
if (progress >= (max_progress - last_iter)):
return target_lr
else:
return annealing_cos(base_lr, target_lr, ((progress - self.warmup_iters) / ((max_progress - self.warmup_iters) - last_iter)))
|
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"Switch the mode of YOLOX during training.\n\n This hook turns off the mosaic and mixup data augmentation and switches\n to use L1 loss in bbox_head.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to close the data augmentation and switch to L1 loss.\n Default: 15.\n skip_type_keys (list[str], optional): Sequence of type string to be\n skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')\n "
def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
'Close mosaic and mixup augmentation and switches to use L1 loss.'
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)):
runner.logger.info('No mosaic and mixup aug now!')
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if (hasattr(train_loader, 'persistent_workers') and (train_loader.persistent_workers is True)):
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
elif self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
def mask_matrix_nms(masks, labels, scores, filter_thr=(- 1), nms_pre=(- 1), max_num=(- 1), kernel='gaussian', sigma=2.0, mask_area=None):
"Matrix NMS for multi-class masks.\n\n Args:\n masks (Tensor): Has shape (num_instances, h, w)\n labels (Tensor): Labels of corresponding masks,\n has shape (num_instances,).\n scores (Tensor): Mask scores of corresponding masks,\n has shape (num_instances).\n filter_thr (float): Score threshold to filter the masks\n after matrix nms. Default: -1, which means do not\n use filter_thr.\n nms_pre (int): The max number of instances to do the matrix nms.\n Default: -1, which means do not use nms_pre.\n max_num (int, optional): If there are more than max_num masks after\n matrix, only top max_num will be kept. Default: -1, which means\n do not use max_num.\n kernel (str): 'linear' or 'gaussian'.\n sigma (float): std in gaussian method.\n mask_area (Tensor): The sum of seg_masks.\n\n Returns:\n tuple(Tensor): Processed mask results.\n\n - scores (Tensor): Updated scores, has shape (n,).\n - labels (Tensor): Remained labels, has shape (n,).\n - masks (Tensor): Remained masks, has shape (n, w, h).\n - keep_inds (Tensor): The indices number of\n the remaining mask in the input mask, has shape (n,).\n "
assert (len(labels) == len(masks) == len(scores))
if (len(labels) == 0):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
if (mask_area is None):
mask_area = masks.sum((1, 2)).float()
else:
assert (len(masks) == len(mask_area))
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = sort_inds
if ((nms_pre > 0) and (len(sort_inds) > nms_pre)):
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, (- 1)).float()
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
iou_matrix = (inter_matrix / ((expanded_mask_area + expanded_mask_area.transpose(1, 0)) - inter_matrix)).triu(diagonal=1)
expanded_labels = labels.expand(num_masks, num_masks)
label_matrix = (expanded_labels == expanded_labels.transpose(1, 0)).triu(diagonal=1)
(compensate_iou, _) = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0)
decay_iou = (iou_matrix * label_matrix)
if (kernel == 'gaussian'):
decay_matrix = torch.exp((((- 1) * sigma) * (decay_iou ** 2)))
compensate_matrix = torch.exp((((- 1) * sigma) * (compensate_iou ** 2)))
(decay_coefficient, _) = (decay_matrix / compensate_matrix).min(0)
elif (kernel == 'linear'):
decay_matrix = ((1 - decay_iou) / (1 - compensate_iou))
(decay_coefficient, _) = decay_matrix.min(0)
else:
raise NotImplementedError(f'{kernel} kernel is not supported in matrix nms!')
scores = (scores * decay_coefficient)
if (filter_thr > 0):
keep = (scores >= filter_thr)
keep_inds = keep_inds[keep]
if (not keep.any()):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if ((max_num > 0) and (len(sort_inds) > max_num)):
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return (scores, labels, masks, keep_inds)
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n '
grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
class DistOptimizerHook(OptimizerHook):
'Deprecated optimizer hook for distributed training.'
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
|
def reduce_mean(tensor):
'"Obtain the mean of tensor on different GPUs.'
if (not (dist.is_available() and dist.is_initialized())):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
|
def obj2tensor(pyobj, device='cuda'):
'Serialize picklable python object to tensor.'
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
|
def tensor2obj(tensor):
'Deserialize tensor to picklable python object.'
return pickle.loads(tensor.cpu().numpy().tobytes())
|
@functools.lru_cache()
def _get_global_gloo_group():
'Return a process group based on gloo backend, containing all the ranks\n The result is cached.'
if (dist.get_backend() == 'nccl'):
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
|
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"Apply all reduce function for python dict object.\n\n The code is modified from https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n NOTE: make sure that py_dict in different ranks has the same keys and\n the values should be in the same shape. Currently only supports\n nccl backend.\n\n Args:\n py_dict (dict): Dict to be applied all reduce op.\n op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'\n group (:obj:`torch.distributed.group`, optional): Distributed group,\n Default: None.\n to_float (bool): Whether to convert all values of dict to float.\n Default: True.\n\n Returns:\n OrderedDict: reduced python dict object.\n "
warnings.warn('group` is deprecated. Currently only supports NCCL backend.')
(_, world_size) = get_dist_info()
if (world_size == 1):
return py_dict
py_key = list(py_dict.keys())
if (not isinstance(py_dict, OrderedDict)):
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
warnings.warn('Note: the "to_float" is True, you need to ensure that the behavior is reasonable.')
flatten_tensor = torch.cat([py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if (op == 'mean'):
flatten_tensor /= world_size
split_tensors = [x.reshape(shape) for (x, shape) in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)]
out_dict = {k: v for (k, v) in zip(py_key, split_tensors)}
if isinstance(py_dict, OrderedDict):
out_dict = OrderedDict(out_dict)
return out_dict
|
def palette_val(palette):
'Convert palette to matplotlib palette.\n\n Args:\n palette List[tuple]: A list of color tuples.\n\n Returns:\n List[tuple[float]]: A list of RGB matplotlib color tuples.\n '
new_palette = []
for color in palette:
color = [(c / 255) for c in color]
new_palette.append(tuple(color))
return new_palette
|
def get_palette(palette, num_classes):
'Get palette from various inputs.\n\n Args:\n palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.\n num_classes (int): the number of classes.\n\n Returns:\n list[tuple[int]]: A list of color tuples.\n '
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = ([palette] * num_classes)
elif ((palette == 'random') or (palette is None)):
state = np.random.get_state()
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif (palette == 'coco'):
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.PALETTE
if (len(dataset_palette) < num_classes):
dataset_palette = CocoPanopticDataset.PALETTE
elif (palette == 'citys'):
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.PALETTE
elif (palette == 'voc'):
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.PALETTE
elif mmcv.is_str(palette):
dataset_palette = ([mmcv.color_val(palette)[::(- 1)]] * num_classes)
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert (len(dataset_palette) >= num_classes), 'The length of palette should not be less than `num_classes`.'
return dataset_palette
|
class COCO(_COCO):
'This class is almost the same as official pycocotools package.\n\n It implements some snake case function aliases. So that the COCO class has\n the same interface as LVIS class.\n '
def __init__(self, annotation_file=None):
if (getattr(pycocotools, '__version__', '0') >= '12.0.2'):
warnings.warn('mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
|
def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder, categories, file_client=None):
'The single core function to evaluate the metric of Panoptic\n Segmentation.\n\n Same as the function with the same name in `panopticapi`. Only the function\n to load the images is changed to use the file client.\n\n Args:\n proc_id (int): The id of the mini process.\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n '
if (PQStat is None):
raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.')
if (file_client is None):
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
pq_stat = PQStat()
idx = 0
for (gt_ann, pred_ann) in annotation_set:
if ((idx % 100) == 0):
print('Core: {}, {} from {} images processed'.format(proc_id, idx, len(annotation_set)))
idx += 1
img_bytes = file_client.get(os.path.join(gt_folder, gt_ann['file_name']))
pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb')
pan_gt = rgb2id(pan_gt)
pan_pred = mmcv.imread(os.path.join(pred_folder, pred_ann['file_name']), flag='color', channel_order='rgb')
pan_pred = rgb2id(pan_pred)
gt_segms = {el['id']: el for el in gt_ann['segments_info']}
pred_segms = {el['id']: el for el in pred_ann['segments_info']}
pred_labels_set = set((el['id'] for el in pred_ann['segments_info']))
(labels, labels_cnt) = np.unique(pan_pred, return_counts=True)
for (label, label_cnt) in zip(labels, labels_cnt):
if (label not in pred_segms):
if (label == VOID):
continue
raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(gt_ann['image_id'], label))
pred_segms[label]['area'] = label_cnt
pred_labels_set.remove(label)
if (pred_segms[label]['category_id'] not in categories):
raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(gt_ann['image_id'], label, pred_segms[label]['category_id']))
if (len(pred_labels_set) != 0):
raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(gt_ann['image_id'], list(pred_labels_set)))
pan_gt_pred = ((pan_gt.astype(np.uint64) * OFFSET) + pan_pred.astype(np.uint64))
gt_pred_map = {}
(labels, labels_cnt) = np.unique(pan_gt_pred, return_counts=True)
for (label, intersection) in zip(labels, labels_cnt):
gt_id = (label // OFFSET)
pred_id = (label % OFFSET)
gt_pred_map[(gt_id, pred_id)] = intersection
gt_matched = set()
pred_matched = set()
for (label_tuple, intersection) in gt_pred_map.items():
(gt_label, pred_label) = label_tuple
if (gt_label not in gt_segms):
continue
if (pred_label not in pred_segms):
continue
if (gt_segms[gt_label]['iscrowd'] == 1):
continue
if (gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']):
continue
union = (((pred_segms[pred_label]['area'] + gt_segms[gt_label]['area']) - intersection) - gt_pred_map.get((VOID, pred_label), 0))
iou = (intersection / union)
if (iou > 0.5):
pq_stat[gt_segms[gt_label]['category_id']].tp += 1
pq_stat[gt_segms[gt_label]['category_id']].iou += iou
gt_matched.add(gt_label)
pred_matched.add(pred_label)
crowd_labels_dict = {}
for (gt_label, gt_info) in gt_segms.items():
if (gt_label in gt_matched):
continue
if (gt_info['iscrowd'] == 1):
crowd_labels_dict[gt_info['category_id']] = gt_label
continue
pq_stat[gt_info['category_id']].fn += 1
for (pred_label, pred_info) in pred_segms.items():
if (pred_label in pred_matched):
continue
intersection = gt_pred_map.get((VOID, pred_label), 0)
if (pred_info['category_id'] in crowd_labels_dict):
intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0)
if ((intersection / pred_info['area']) > 0.5):
continue
pq_stat[pred_info['category_id']].fp += 1
print('Core: {}, all {} images processed'.format(proc_id, len(annotation_set)))
return pq_stat
|
def pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories, file_client=None):
'Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n Same as the function with the same name in `panopticapi`.\n\n Args:\n matched_annotations_list (list): The matched annotation list. Each\n element is a tuple of annotations of the same image with the\n format (gt_anns, pred_anns).\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n '
if (PQStat is None):
raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.')
if (file_client is None):
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
cpu_num = multiprocessing.cpu_count()
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for (proc_id, annotation_set) in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core, (proc_id, annotation_set, gt_folder, pred_folder, categories, file_client))
processes.append(p)
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat
|
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if ('separate_eval' in data_cfg):
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
|
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'ConcatDataset'):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True))
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif (cfg['type'] == 'ClassBalancedDataset'):
dataset = ClassBalancedDataset(build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif (cfg['type'] == 'MultiImageMixDataset'):
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, runner_type='EpochBasedRunner', persistent_workers=False, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int, Optional): Seed to be used. Default: None.\n runner_type (str): Type of runner. Default: `EpochBasedRunner`\n persistent_workers (bool): If True, the data loader will not shutdown\n the worker processes after a dataset has been consumed once.\n This allows to maintain the workers `Dataset` instances alive.\n This argument is only valid when PyTorch>=1.7.0. Default: False.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n '
(rank, world_size) = get_dist_info()
if dist:
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
if (runner_type == 'IterBasedRunner'):
if shuffle:
batch_sampler = InfiniteGroupBatchSampler(dataset, batch_size, world_size, rank, seed=seed)
else:
batch_sampler = InfiniteBatchSampler(dataset, batch_size, world_size, rank, seed=seed, shuffle=False)
batch_size = 1
sampler = None
else:
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False, seed=seed)
else:
sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None)
batch_sampler = None
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.7.0'))):
kwargs['persistent_workers'] = persistent_workers
elif (persistent_workers is True):
warnings.warn('persistent_workers is invalid because your pytorch version is lower than 1.7.0')
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader
|
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
ids_in_cat = set()
for (i, class_id) in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if (self.filter_empty_gt and ((self.img_ids[i] not in ids_in_cat) or all_iscrowd)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are already decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] not in self.cat_ids):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
'Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is "somepath/xxx",\n the txt files will be named "somepath/xxx.txt".\n\n Returns:\n list[str]: Result txt files which contains corresponding instance segmentation images.\n '
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.data_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt'))
(bbox_result, segm_result) = result
bboxes = np.vstack(bbox_result)
if isinstance(segm_result, tuple):
segms = mmcv.concat_list(segm_result[0])
mask_score = segm_result[1]
else:
segms = mmcv.concat_list(segm_result)
mask_score = [bbox[(- 1)] for bbox in bboxes]
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
assert (len(bboxes) == len(segms) == len(labels))
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix, (basename + f'_{i}_{classes}.png'))
mmcv.imwrite(mask, png_filename)
fout.write(f'''{osp.basename(png_filename)} {class_id} {score}
''')
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
'Format the results to txt (standard format for Cityscapes\n evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving txt/png files when txtfile_prefix is not specified.\n '
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (txtfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
'Evaluation in Cityscapes/COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n \'bbox\', \'segm\', \'proposal\', \'proposal_fast\'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None): The prefix of output file. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If results are evaluated with COCO protocol, it would be the\n prefix of output json file. For example, the metric is \'bbox\'\n and \'segm\', then json files would be "a/b/prefix.bbox.json" and\n "a/b/prefix.segm.json".\n If results are evaluated with cityscapes protocol, it would be\n the prefix of output txt/png files. The output files would be\n png images under folder "a/b/prefix/xxx/" and the file name of\n images would be written into a txt file\n "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of\n cityscapes. If not specified, a temp file will be created.\n Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str, float]: COCO style evaluation metric or cityscapes mAP and AP@50.\n '
eval_results = dict()
metrics = (metric.copy() if isinstance(metric, list) else [metric])
if ('cityscapes' in metrics):
eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
if (len(metrics) > 0):
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt)
self_coco.CLASSES = self.CLASSES
self_coco.data_infos = self_coco.load_annotations(self.ann_file)
eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
"Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of output txt file\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: Cityscapes evaluation results, contains 'mAP' and 'AP@50'.\n "
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
(result_files, tmp_dir) = self.format_results(results, txtfile_prefix)
if (tmp_dir is None):
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = OrderedDict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), f'Cannot find ground truth images in {CSEval.args.groundTruthSearch}.'
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results
|
@DATASETS.register_module()
class CustomDataset(Dataset):
"Custom dataset for detection.\n\n The annotation format is shown as follows. The `ann` field is optional for\n testing.\n\n .. code-block:: none\n\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n 'height': 720,\n 'ann': {\n 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.\n 'labels': <np.ndarray> (n, ),\n 'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n 'labels_ignore': <np.ndarray> (k, 4) (optional field)\n }\n },\n ...\n ]\n\n Args:\n ann_file (str): Annotation file path.\n pipeline (list[dict]): Processing pipeline.\n classes (str | Sequence[str], optional): Specify classes to load.\n If is None, ``cls.CLASSES`` will be used. Default: None.\n data_root (str, optional): Data root for ``ann_file``,\n ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.\n test_mode (bool, optional): If set True, annotation will not be loaded.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes of the dataset's classes will be filtered out. This option\n only works when `test_mode=False`, i.e., we never filter images\n during tests.\n "
CLASSES = None
PALETTE = None
def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
self.file_client = mmcv.FileClient(**file_client_args)
if (self.data_root is not None):
if (not osp.isabs(self.ann_file)):
self.ann_file = osp.join(self.data_root, self.ann_file)
if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.ann_file) as local_path:
self.data_infos = self.load_annotations(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.data_infos = self.load_annotations(self.ann_file)
if (self.proposal_file is not None):
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.proposal_file) as local_path:
self.proposals = self.load_proposals(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
if (not test_mode):
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if (self.proposals is not None):
self.proposals = [self.proposals[i] for i in valid_inds]
self._set_group_flag()
self.pipeline = Compose(pipeline)
def __len__(self):
'Total number of samples of data.'
return len(self.data_infos)
def load_annotations(self, ann_file):
'Load annotation from annotation file.'
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
'Load proposal from proposal file.'
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
'Get annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
'Get category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
'Prepare results dict for pipeline.'
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
'Filter images too small.'
if self.filter_empty_gt:
warnings.warn('CustomDataset does not support filtering empty gt images.')
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n '
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if ((img_info['width'] / img_info['height']) > 1):
self.flag[i] = 1
def _rand_another(self, idx):
'Get another random index from the same group as the given index.'
pool = np.where((self.flag == self.flag[idx]))[0]
return np.random.choice(pool)
def __getitem__(self, idx):
'Get training/test data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training/test data (with annotation if `test_mode` is set True).\n '
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if (data is None):
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
'Get training data and annotations after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys introduced by pipeline.\n '
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
'Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by pipeline.\n '
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
'Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n\n Returns:\n tuple[str] or list[str]: Names of categories of the dataset.\n '
if (classes is None):
return cls.CLASSES
if isinstance(classes, str):
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
'Place holder to format result to dataset specific output.'
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
'Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n Default: None.\n '
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
def __repr__(self):
'Print the number of instance number.'
dataset_type = ('Test' if self.test_mode else 'Train')
result = f'''
{self.__class__.__name__} {dataset_type} dataset with number of images {len(self)}, and instance counts:
'''
if (self.CLASSES is None):
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros((len(self.CLASSES) + 1)).astype(int)
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
(unique, counts) = np.unique(label, return_counts=True)
if (len(unique) > 0):
instance_count[unique] += counts
else:
instance_count[(- 1)] += 1
table_data = [(['category', 'count'] * 5)]
row_data = []
for (cls, count) in enumerate(instance_count):
if (cls < len(self.CLASSES)):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
row_data += ['-1 background', f'{count}']
if (len(row_data) == 10):
table_data.append(row_data)
row_data = []
if (len(row_data) >= 2):
if (row_data[(- 1)] == '0'):
row_data = row_data[:(- 2)]
if (len(row_data) >= 2):
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result
|
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n separate_eval (bool): Whether to evaluate the results\n separately if it is used as validation dataset.\n Defaults to True.\n '
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = getattr(datasets[0], 'PALETTE', None)
self.separate_eval = separate_eval
if (not separate_eval):
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"')
elif (len(set([type(ds) for ds in datasets])) != 1):
raise NotImplementedError('All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
'Get category ids of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def get_ann_info(self, idx):
'Get annotation of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx].get_ann_info(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
'Evaluate the results.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: AP results of the total dataset or each separate\n dataset if `self.separate_eval=True`.\n '
assert (len(results) == self.cumulative_sizes[(- 1)]), f'Dataset and results have different sizes: {self.cumulative_sizes[(- 1)]} v.s. {len(results)}'
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = (- 1)
total_eval_results = dict()
for (size, dataset) in zip(self.cumulative_sizes, self.datasets):
start_idx = (0 if (dataset_idx == (- 1)) else self.cumulative_sizes[dataset_idx])
end_idx = self.cumulative_sizes[(dataset_idx + 1)]
results_per_dataset = results[start_idx:end_idx]
print_log(f'''
Evaluateing {dataset.ann_file} with {len(results_per_dataset)} images now''', logger=logger)
eval_results_per_dataset = dataset.evaluate(results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for (k, v) in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"')
elif (len(set([type(ds) for ds in self.datasets])) != 1):
raise NotImplementedError('All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum([dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
|
@DATASETS.register_module()
class RepeatDataset():
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n '
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[(idx % self._ori_len)]
def get_cat_ids(self, idx):
'Get category ids of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
return self.dataset.get_cat_ids((idx % self._ori_len))
def get_ann_info(self, idx):
'Get annotation of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
return self.dataset.get_ann_info((idx % self._ori_len))
def __len__(self):
'Length after repetition.'
return (self.times * self._ori_len)
|
@DATASETS.register_module()
class ClassBalancedDataset():
'A wrapper of repeated dataset with repeat factor.\n\n Suitable for training on class imbalanced datasets like LVIS. Following\n the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,\n in each epoch, an image may appear multiple times based on its\n "repeat factor".\n The repeat factor for an image is a function of the frequency the rarest\n category labeled in that image. The "frequency of category c" in [0, 1]\n is defined by the fraction of images in the training set (without repeats)\n in which category c appears.\n The dataset needs to instantiate :func:`self.get_cat_ids` to support\n ClassBalancedDataset.\n\n The repeat factor is computed as followed.\n\n 1. For each category c, compute the fraction # of images\n that contain it: :math:`f(c)`\n 2. For each category c, compute the category-level repeat factor:\n :math:`r(c) = max(1, sqrt(t/f(c)))`\n 3. For each image I, compute the image-level repeat factor:\n :math:`r(I) = max_{c in I} r(c)`\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be repeated.\n oversample_thr (float): frequency threshold below which data is\n repeated. For categories with ``f_c >= oversample_thr``, there is\n no oversampling. For categories with ``f_c < oversample_thr``, the\n degree of oversampling following the square-root inverse frequency\n heuristic above.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes will not be oversampled. Otherwise, they will be categorized\n as the pure background class and involved into the oversampling.\n Default: True.\n '
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for (dataset_idx, repeat_factor) in enumerate(repeat_factors):
repeat_indices.extend(([dataset_idx] * math.ceil(repeat_factor)))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for (flag, repeat_factor) in zip(self.dataset.flag, repeat_factors):
flags.extend(([flag] * int(math.ceil(repeat_factor))))
assert (len(flags) == len(repeat_indices))
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
'Get repeat factor for each images in the dataset.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset\n repeat_thr (float): The threshold of frequency. If an image\n contains the categories whose frequency below the threshold,\n it would be repeated.\n\n Returns:\n list[float]: The repeat factors for each images in the dataset.\n '
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if ((len(cat_ids) == 0) and (not self.filter_empty_gt)):
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for (k, v) in category_freq.items():
category_freq[k] = (v / num_images)
category_repeat = {cat_id: max(1.0, math.sqrt((repeat_thr / cat_freq))) for (cat_id, cat_freq) in category_freq.items()}
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if ((len(cat_ids) == 0) and (not self.filter_empty_gt)):
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if (len(cat_ids) > 0):
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def get_ann_info(self, idx):
'Get annotation of dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
ori_index = self.repeat_indices[idx]
return self.dataset.get_ann_info(ori_index)
def __len__(self):
'Length after repetition.'
return len(self.repeat_indices)
|
@DATASETS.register_module()
class MultiImageMixDataset():
'A wrapper of multiple images mixed dataset.\n\n Suitable for training on multiple images mixed data augmentation like\n mosaic and mixup. For the augmentation pipeline of mixed image data,\n the `get_indexes` method needs to be provided to obtain the image\n indexes, and you can set `skip_flags` to change the pipeline running\n process. At the same time, we provide the `dynamic_scale` parameter\n to dynamically change the output image size.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be mixed.\n pipeline (Sequence[dict]): Sequence of transform object or\n config dict to be composed.\n dynamic_scale (tuple[int], optional): The image scale can be changed\n dynamically. Default to None. It is deprecated.\n skip_type_keys (list[str], optional): Sequence of type string to\n be skip pipeline. Default to None.\n '
def __init__(self, dataset, pipeline, dynamic_scale=None, skip_type_keys=None):
if (dynamic_scale is not None):
raise RuntimeError('dynamic_scale is deprecated. Please use Resize pipeline to achieve similar functions')
assert isinstance(pipeline, collections.abc.Sequence)
if (skip_type_keys is not None):
assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = dataset.flag
self.num_samples = len(dataset)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline, self.pipeline_types):
if ((self._skip_type_keys is not None) and (transform_type in self._skip_type_keys)):
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if (not isinstance(indexes, collections.abc.Sequence)):
indexes = [indexes]
mix_results = [copy.deepcopy(self.dataset[index]) for index in indexes]
results['mix_results'] = mix_results
results = transform(results)
if ('mix_results' in results):
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
'Update skip_type_keys. It is called by an external hook.\n\n Args:\n skip_type_keys (list[str], optional): Sequence of type\n string to be skip pipeline.\n '
assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys])
self._skip_type_keys = skip_type_keys
|
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), (128, 0, 96), (128, 0, 192), (0, 32, 192)]
|
@PIPELINES.register_module()
class Compose():
'Compose multiple transforms sequentially.\n\n Args:\n transforms (Sequence[dict | callable]): Sequence of transform object or\n config dict to be composed.\n '
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
'Call function to apply transforms sequentially.\n\n Args:\n data (dict): A result dict contains the data to transform.\n\n Returns:\n dict: Transformed data.\n '
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
str_ = t.__repr__()
if ('Compose(' in str_):
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n '
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
@PIPELINES.register_module()
class ToTensor():
'Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n '
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class ImageToTensor():
'Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n '
for key in self.keys:
img = results[key]
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
results[key] = to_tensor(img.transpose(2, 0, 1)).contiguous()
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class Transpose():
'Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n '
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
'Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to ``self.order``.\n '
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, order={self.order})')
|
@PIPELINES.register_module()
class ToDataContainer():
"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n "
def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
'Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to :obj:`mmcv.DataContainer`.\n '
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(fields={self.fields})')
|
@PIPELINES.register_module()
class DefaultFormatBundle():
'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img",\n "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, (3)to DataContainer (stack=True)\n\n Args:\n img_to_float (bool): Whether to force the image to be converted to\n float type. Default: True.\n pad_val (dict): A dict for padding value in batch collating,\n the default value is `dict(img=0, masks=0, seg=255)`.\n Without this argument, the padding value of "gt_semantic_seg"\n will be set to 0 by default, which should be 255.\n '
def __init__(self, img_to_float=True, pad_val=dict(img=0, masks=0, seg=255)):
self.img_to_float = img_to_float
self.pad_val = pad_val
def __call__(self, results):
'Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with default bundle.\n '
if ('img' in results):
img = results['img']
if ((self.img_to_float is True) and (img.dtype == np.uint8)):
img = img.astype(np.float32)
results = self._add_default_meta_keys(results)
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), padding_value=self.pad_val['img'], stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], padding_value=self.pad_val['masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), padding_value=self.pad_val['seg'], stack=True)
return results
def _add_default_meta_keys(self, results):
'Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n '
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = (1 if (len(img.shape) < 3) else img.shape[2])
results.setdefault('img_norm_cfg', dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False))
return results
def __repr__(self):
return (self.__class__.__name__ + f'(img_to_float={self.img_to_float})')
|
@PIPELINES.register_module()
class Collect():
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "proposals", "gt_bboxes",\n "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".\n\n The "img_meta" item is always populated. The contents of the "img_meta"\n dictionary depends on "meta_keys". By default this includes:\n\n - "img_shape": shape of the image input to the network as a tuple (h, w, c). Note that images may be zero padded on the bottom/right if the batch tensor is larger than this shape.\n\n - "scale_factor": a float indicating the preprocessing scale\n\n - "flip": a boolean indicating if image flip transform was used\n\n - "filename": path to the image file\n\n - "ori_shape": original shape of the image as a tuple (h, w, c)\n\n - "pad_shape": image shape after padding\n\n - "img_norm_cfg": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``(\'filename\', \'ori_filename\', \'ori_shape\', \'img_shape\',\n \'pad_shape\', \'scale_factor\', \'flip\', \'flip_direction\',\n \'img_norm_cfg\')``\n '
def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
'Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n '
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})')
|
@PIPELINES.register_module()
class WrapFieldsToLists():
"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapFieldsToLists')\n >>> ]\n "
def __call__(self, results):
'Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped into list.\n '
for (key, val) in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
|
@PIPELINES.register_module()
class InstaBoost():
'Data augmentation method in `InstaBoost: Boosting Instance\n Segmentation Via Probability Map Guided Copy-Pasting\n <https://arxiv.org/abs/1908.07801>`_.\n\n Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n Args:\n action_candidate (tuple): Action candidates. "normal", "horizontal", \\\n "vertical", "skip" are supported. Default: (\'normal\', \\\n \'horizontal\', \'skip\').\n action_prob (tuple): Corresponding action probabilities. Should be \\\n the same length as action_candidate. Default: (1, 0, 0).\n scale (tuple): (min scale, max scale). Default: (0.8, 1.2).\n dx (int): The maximum x-axis shift will be (instance width) / dx.\n Default 15.\n dy (int): The maximum y-axis shift will be (instance height) / dy.\n Default 15.\n theta (tuple): (min rotation degree, max rotation degree). \\\n Default: (-1, 1).\n color_prob (float): Probability of images for color augmentation.\n Default 0.5.\n heatmap_flag (bool): Whether to use heatmap guided. Default False.\n aug_ratio (float): Probability of applying this transformation. \\\n Default 0.5.\n '
def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=((- 1), 1), color_prob=0.5, hflag=False, aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
(x1, y1, x2, y2) = bbox
bbox = [x1, y1, (x2 - x1), (y2 - y1)]
anns.append({'category_id': label, 'segmentation': mask, 'bbox': bbox})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
(x1, y1, w, h) = ann['bbox']
if ((w <= 0) or (h <= 0)):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
orig_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[(1 - self.aug_ratio), self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first.')
(anns, img) = instaboost.get_new_data(anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(orig_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
@PIPELINES.register_module()
class MultiScaleFlipAug():
'Test-time augmentation with multiple scales and flipping.\n\n An example configuration is as followed:\n\n .. code-block::\n\n img_scale=[(1333, 400), (1333, 800)],\n flip=True,\n transforms=[\n dict(type=\'Resize\', keep_ratio=True),\n dict(type=\'RandomFlip\'),\n dict(type=\'Normalize\', **img_norm_cfg),\n dict(type=\'Pad\', size_divisor=32),\n dict(type=\'ImageToTensor\', keys=[\'img\']),\n dict(type=\'Collect\', keys=[\'img\']),\n ]\n\n After MultiScaleFLipAug with above configuration, the results are wrapped\n into lists of the same length as followed:\n\n .. code-block::\n\n dict(\n img=[...],\n img_shape=[...],\n scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]\n flip=[False, True, False, True]\n ...\n )\n\n Args:\n transforms (list[dict]): Transforms to apply in each augmentation.\n img_scale (tuple | list[tuple] | None): Images scales for resizing.\n scale_factor (float | list[float] | None): Scale factors for resizing.\n flip (bool): Whether apply flip augmentation. Default: False.\n flip_direction (str | list[str]): Flip augmentation directions,\n options are "horizontal", "vertical" and "diagonal". If\n flip_direction is a list, multiple flip augmentations will be\n applied. It has no effect when flip == False. Default:\n "horizontal".\n '
def __init__(self, transforms, img_scale=None, scale_factor=None, flip=False, flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert ((img_scale is None) ^ (scale_factor is None)), 'Must have but only one variable can be set'
if (img_scale is not None):
self.img_scale = (img_scale if isinstance(img_scale, list) else [img_scale])
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = (scale_factor if isinstance(scale_factor, list) else [scale_factor])
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = (flip_direction if isinstance(flip_direction, list) else [flip_direction])
assert mmcv.is_list_of(self.flip_direction, str)
if ((not self.flip) and (self.flip_direction != ['horizontal'])):
warnings.warn('flip_direction has no effect when flip is set to False')
if (self.flip and (not any([(t['type'] == 'RandomFlip') for t in transforms]))):
warnings.warn('flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
'Call function to apply test time augment transforms on results.\n\n Args:\n results (dict): Result dict contains the data to transform.\n\n Returns:\n dict[str: list]: The augmented data, where each value is wrapped\n into a list.\n '
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction) for direction in self.flip_direction]
for scale in self.img_scale:
for (flip, direction) in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for (key, val) in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.seed = (seed if (seed is not None) else 0)
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices = (indices * math.ceil((self.total_size / len(indices))))[:self.total_size]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
|
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, size) in enumerate(self.group_sizes):
self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu)
def __iter__(self):
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size == 0):
continue
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
np.random.shuffle(indice)
num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice))
indice = np.concatenate([indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
|
class DistributedGroupSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n seed (int, optional): random seed used to shuffle the sampler if\n ``shuffle=True``. This number should be identical across all\n processes in the distributed group. Default: 0.\n '
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=0):
(_rank, _num_replicas) = get_dist_info()
if (num_replicas is None):
num_replicas = _num_replicas
if (rank is None):
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = (seed if (seed is not None) else 0)
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, j) in enumerate(self.group_sizes):
self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu)
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size > 0):
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
indice = indice[list(torch.randperm(int(size), generator=g).numpy())].tolist()
extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice))
tmp = indice.copy()
for _ in range((extra // size)):
indice.extend(tmp)
indice.extend(tmp[:(extra % size)])
indices.extend(indice)
assert (len(indices) == self.total_size)
indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))]
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class InfiniteGroupBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `GroupSampler. It is designed for\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time, all indices in a batch should be in the same group.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU.\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it\n should be noted that `shuffle` can not guarantee that you can\n generate sequential indices because it need to ensure\n that all indices in a batch is in a group. Default: True.\n '
def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True):
(_rank, _world_size) = get_dist_info()
if (world_size is None):
world_size = _world_size
if (rank is None):
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = (seed if (seed is not None) else 0)
self.shuffle = shuffle
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))}
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
'Infinitely yield a sequence of indices.'
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
(yield from torch.randperm(self.size, generator=g).tolist())
else:
(yield from torch.arange(self.size).tolist())
def _indices_of_rank(self):
'Slice the infinite indices by rank.'
(yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size))
def __iter__(self):
for idx in self.indices:
flag = self.flag[idx]
group_buffer = self.buffer_per_group[flag]
group_buffer.append(idx)
if (len(group_buffer) == self.batch_size):
(yield group_buffer[:])
del group_buffer[:]
def __len__(self):
'Length of base dataset.'
return self.size
def set_epoch(self, epoch):
'Not supported in `IterationBased` runner.'
raise NotImplementedError
|
class InfiniteBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `DistributedSampler. It is designed\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU,\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the dataset or not. Default: True.\n '
def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True):
(_rank, _world_size) = get_dist_info()
if (world_size is None):
world_size = _world_size
if (rank is None):
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = (seed if (seed is not None) else 0)
self.shuffle = shuffle
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
'Infinitely yield a sequence of indices.'
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
(yield from torch.randperm(self.size, generator=g).tolist())
else:
(yield from torch.arange(self.size).tolist())
def _indices_of_rank(self):
'Slice the infinite indices by rank.'
(yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size))
def __iter__(self):
batch_buffer = []
for idx in self.indices:
batch_buffer.append(idx)
if (len(batch_buffer) == self.batch_size):
(yield batch_buffer)
batch_buffer = []
def __len__(self):
'Length of base dataset.'
return self.size
def set_epoch(self, epoch):
'Not supported in `IterationBased` runner.'
raise NotImplementedError
|
def replace_ImageToTensor(pipelines):
"Replace the ImageToTensor transform in a data pipeline to\n DefaultFormatBundle, which is normally useful in batch inference.\n\n Args:\n pipelines (list[dict]): Data pipeline configs.\n\n Returns:\n list: The new pipeline list with all ImageToTensor replaced by\n DefaultFormatBundle.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='ImageToTensor', keys=['img']),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> assert expected_pipelines == replace_ImageToTensor(pipelines)\n "
pipelines = copy.deepcopy(pipelines)
for (i, pipeline) in enumerate(pipelines):
if (pipeline['type'] == 'MultiScaleFlipAug'):
assert ('transforms' in pipeline)
pipeline['transforms'] = replace_ImageToTensor(pipeline['transforms'])
elif (pipeline['type'] == 'ImageToTensor'):
warnings.warn('"ImageToTensor" pipeline is replaced by "DefaultFormatBundle" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
|
def get_loading_pipeline(pipeline):
"Only keep loading image and annotations related configuration.\n\n Args:\n pipeline (list[dict]): Data pipeline configs.\n\n Returns:\n list[dict]: The new pipeline list with only keep\n loading image and annotations related configuration.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True),\n ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n ... dict(type='RandomFlip', flip_ratio=0.5),\n ... dict(type='Normalize', **img_norm_cfg),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True)\n ... ]\n >>> assert expected_pipelines == ... get_loading_pipeline(pipelines)\n "
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = PIPELINES.get(cfg['type'])
if ((obj_cls is not None) and (obj_cls in (LoadImageFromFile, LoadAnnotations, LoadPanopticAnnotations))):
loading_pipeline_cfg.append(cfg)
assert (len(loading_pipeline_cfg) == 2), 'The data pipeline in your config file must include loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
'Check whether the `num_classes` in head matches the length of\n `CLASSES` in `dataset`.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
model = runner.model
dataset = runner.data_loader.dataset
if (dataset.CLASSES is None):
runner.logger.warning(f'Please set `CLASSES` in the {dataset.__class__.__name__} andcheck if it is consistent with the `num_classes` of head')
else:
assert (type(dataset.CLASSES) is not str), f'`CLASSES` in {dataset.__class__.__name__}should be a tuple of str.Add comma if number of classes is 1 as CLASSES = ({dataset.CLASSES},)'
for (name, module) in model.named_modules():
if (hasattr(module, 'num_classes') and (not isinstance(module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)))):
assert (module.num_classes == len(dataset.CLASSES)), f'The `num_classes` ({module.num_classes}) in {module.__class__.__name__} of {model.__class__.__name__} does not matches the length of `CLASSES` {len(dataset.CLASSES)}) in {dataset.__class__.__name__}'
def before_train_epoch(self, runner):
'Check whether the training dataset is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
self._check_head(runner)
def before_val_epoch(self, runner):
'Check whether the dataset in val epoch is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
self._check_head(runner)
|
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if ('VOC2007' in self.img_prefix):
self.year = 2007
elif ('VOC2012' in self.img_prefix):
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
"Evaluate in VOC protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'mAP', 'recall'.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple], optional): Scale ranges for evaluating\n mAP. If not specified, all bounding boxes would be included in\n evaluation. Default: None.\n\n Returns:\n dict[str, float]: AP/recall metrics.\n "
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
if (self.year == 2007):
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger, use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
eval_results.move_to_end('mAP', last=False)
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger=logger, use_legacy_coordinate=True)
for (i, num) in enumerate(proposal_nums):
for (j, iou_thr) in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
'Reader for the WIDER Face dataset in PASCAL VOC format.\n\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n '
CLASSES = ('face',)
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
'Load annotation from WIDERFace XML style annotation file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n '
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height))
return data_infos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.