code stringlengths 17 6.64M |
|---|
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
'Initialize module parameters with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n a (int | float): the lower bound of the uniform distribution.\n Defaults to 0.\n b (int | float): the upper bound of the uniform distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n '
def __init__(self, a=0, b=1, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
def __call__(self, module):
def init(m):
if self.wholemodule:
uniform_init(m, self.a, self.b, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len((set(self.layer) & set(([layername] + basesname)))):
uniform_init(m, self.a, self.b, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, b={self.b}, bias={self.bias}'
return info
|
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
"Initialize module parameters with the values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification - He, K. et al. (2015).\n <https://www.cv-foundation.org/openaccess/content_iccv_2015/\n papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_\n\n Args:\n a (int | float): the negative slope of the rectifier used after this\n layer (only used with ``'leaky_relu'``). Defaults to 0.\n mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing\n ``'fan_in'`` preserves the magnitude of the variance of the weights\n in the forward pass. Choosing ``'fan_out'`` preserves the\n magnitudes in the backwards pass. Defaults to ``'fan_out'``.\n nonlinearity (str): the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` .\n Defaults to 'relu'.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'`` or\n ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n "
def __init__(self, a=0, mode='fan_out', nonlinearity='relu', distribution='normal', **kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len((set(self.layer) & set(([layername] + basesname)))):
kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, nonlinearity={self.nonlinearity}, distribution ={self.distribution}, bias={self.bias}'
return info
|
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
def __init__(self, **kwargs):
super().__init__(a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs)
def __call__(self, module):
super().__call__(module)
|
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"Initialize module by loading a pretrained model.\n\n Args:\n checkpoint (str): the checkpoint file of the pretrained model should\n be load.\n prefix (str, optional): the prefix of a sub-module in the pretrained\n model. it is for loading a part of the pretrained model to\n initialize. For example, if we would like to only load the\n backbone of a detector model, we can set ``prefix='backbone.'``.\n Defaults to None.\n map_location (str): map tensors into proper locations.\n "
def __init__(self, checkpoint, prefix=None, map_location=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module):
from mmcv.runner import _load_checkpoint_with_prefix, load_checkpoint, load_state_dict
logger = get_logger('mmcv')
if (self.prefix is None):
print_log(f'load model from: {self.checkpoint}', logger=logger)
load_checkpoint(module, self.checkpoint, map_location=self.map_location, strict=False, logger=logger)
else:
print_log(f'load {self.prefix} in model from: {self.checkpoint}', logger=logger)
state_dict = _load_checkpoint_with_prefix(self.prefix, self.checkpoint, map_location=self.map_location)
load_state_dict(module, state_dict, strict=False, logger=logger)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info
|
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
func.wholemodule = wholemodule
func(module)
|
def _initialize_override(module, override, cfg):
if (not isinstance(override, (dict, list))):
raise TypeError(f'override must be a dict or a list of dict, but got {type(override)}')
override = ([override] if isinstance(override, dict) else override)
for override_ in override:
cp_override = copy.deepcopy(override_)
name = cp_override.pop('name', None)
if (name is None):
raise ValueError(f'`override` must contain the key "name",but got {cp_override}')
if (not cp_override):
cp_override.update(cfg)
elif ('type' not in cp_override.keys()):
raise ValueError(f'`override` need "type" key, but got {cp_override}')
if hasattr(module, name):
_initialize(getattr(module, name), cp_override, wholemodule=True)
else:
raise RuntimeError(f'module did not have attribute {name}, but init_cfg is {cp_override}.')
|
def initialize(module, init_cfg):
'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,\n ``Kaiming``, and ``Pretrained``.\n\n Example:\n >>> module = nn.Linear(2, 3, bias=True)\n >>> init_cfg = dict(type=\'Constant\', layer=\'Linear\', val =1 , bias =2)\n >>> initialize(module, init_cfg)\n\n >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))\n >>> # define key ``\'layer\'`` for initializing layer with different\n >>> # configuration\n >>> init_cfg = [dict(type=\'Constant\', layer=\'Conv1d\', val=1),\n dict(type=\'Constant\', layer=\'Linear\', val=2)]\n >>> initialize(module, init_cfg)\n\n >>> # define key``\'override\'`` to initialize some specific part in\n >>> # module\n >>> class FooNet(nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.feat = nn.Conv2d(3, 16, 3)\n >>> self.reg = nn.Conv2d(16, 10, 3)\n >>> self.cls = nn.Conv2d(16, 5, 3)\n >>> model = FooNet()\n >>> init_cfg = dict(type=\'Constant\', val=1, bias=2, layer=\'Conv2d\',\n >>> override=dict(type=\'Constant\', name=\'reg\', val=3, bias=4))\n >>> initialize(model, init_cfg)\n\n >>> model = ResNet(depth=50)\n >>> # Initialize weights with the pretrained model.\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=\'torchvision://resnet50\')\n >>> initialize(model, init_cfg)\n\n >>> # Initialize weights of a sub-module with the specific part of\n >>> # a pretrained model by using "prefix".\n >>> url = \'http://download.openmmlab.com/mmdetection/v2.0/retinanet/\'\\\n >>> \'retinanet_r50_fpn_1x_coco/\'\\\n >>> \'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth\'\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=url, prefix=\'backbone.\')\n '
if (not isinstance(init_cfg, (dict, list))):
raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if (override is not None):
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
pass
|
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor:
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
lower = norm_cdf(((a - mean) / std))
upper = norm_cdf(((b - mean) / std))
tensor.uniform_(((2 * lower) - 1), ((2 * upper) - 1))
tensor.erfinv_()
tensor.mul_((std * math.sqrt(2.0)))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
|
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor:
'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Modified from\n https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n\n Args:\n tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.\n mean (float): the mean of the normal distribution.\n std (float): the standard deviation of the normal distribution.\n a (float): the minimum cutoff value.\n b (float): the maximum cutoff value.\n '
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
def conv3x3(in_planes, out_planes, dilation=1):
'3x3 convolution with padding.'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
|
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False):
layers = []
for _ in range(num_blocks):
layers.append(conv3x3(inplanes, planes, dilation))
if with_bn:
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
inplanes = planes
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers
|
class VGG(nn.Module):
'VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_bn (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n '
arch_settings = {11: (1, 1, 2, 2, 2), 13: (2, 2, 2, 2, 2), 16: (2, 2, 3, 3, 3), 19: (2, 2, 4, 4, 4)}
def __init__(self, depth, with_bn=False, num_classes=(- 1), num_stages=5, dilations=(1, 1, 1, 1, 1), out_indices=(0, 1, 2, 3, 4), frozen_stages=(- 1), bn_eval=True, bn_frozen=False, ceil_mode=False, with_last_pool=True):
super(VGG, self).__init__()
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for vgg')
assert ((num_stages >= 1) and (num_stages <= 5))
stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
assert (len(dilations) == num_stages)
assert (max(out_indices) <= num_stages)
self.num_classes = num_classes
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.inplanes = 3
start_idx = 0
vgg_layers = []
self.range_sub_modules = []
for (i, num_blocks) in enumerate(self.stage_blocks):
num_modules = ((num_blocks * (2 + with_bn)) + 1)
end_idx = (start_idx + num_modules)
dilation = dilations[i]
planes = ((64 * (2 ** i)) if (i < 4) else 512)
vgg_layer = make_vgg_layer(self.inplanes, planes, num_blocks, dilation=dilation, with_bn=with_bn, ceil_mode=ceil_mode)
vgg_layers.extend(vgg_layer)
self.inplanes = planes
self.range_sub_modules.append([start_idx, end_idx])
start_idx = end_idx
if (not with_last_pool):
vgg_layers.pop((- 1))
self.range_sub_modules[(- 1)][1] -= 1
self.module_name = 'features'
self.add_module(self.module_name, nn.Sequential(*vgg_layers))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
from ..runner import load_checkpoint
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
vgg_layers = getattr(self, self.module_name)
for i in range(len(self.stage_blocks)):
for j in range(*self.range_sub_modules[i]):
vgg_layer = vgg_layers[j]
x = vgg_layer(x)
if (i in self.out_indices):
outs.append(x)
if (self.num_classes > 0):
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(VGG, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
vgg_layers = getattr(self, self.module_name)
if (mode and (self.frozen_stages >= 0)):
for i in range(self.frozen_stages):
for j in range(*self.range_sub_modules[i]):
mod = vgg_layers[j]
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
def single_gpu_test(model, data_loader):
'Test model with a single gpu.\n\n This method tests model with a single gpu and displays test progress bar.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n\n Returns:\n list: The prediction results.\n '
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
'Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting\n ``gpu_collect=True``, it encodes results to gpu tensors and use gpu\n communication for results collection. On cpu mode it saves the results on\n different gpus to ``tmpdir`` and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n '
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2)
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
if (rank == 0):
batch_size = len(result)
batch_size_all = (batch_size * world_size)
if ((batch_size_all + prog_bar.completed) > len(dataset)):
batch_size_all = (len(dataset) - prog_bar.completed)
for _ in range(batch_size_all):
prog_bar.update()
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
|
def collect_results_cpu(result_part, size, tmpdir=None):
'Collect results under cpu mode.\n\n On cpu mode, this function will save the results on different gpus to\n ``tmpdir`` and collect them by the rank 0 worker.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n size (int): Size of the results, commonly equal to length of\n the results.\n tmpdir (str | None): temporal directory for collected results to\n store. If set to None, it will create a random temporal directory\n for it.\n\n Returns:\n list: The collected results.\n '
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
if part_result:
part_list.append(part_result)
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
|
def collect_results_gpu(result_part, size):
'Collect results under gpu mode.\n\n On gpu mode, this function will encode results to gpu tensors and use gpu\n communication for results collection.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n size (int): Size of the results, commonly equal to length of\n the results.\n\n Returns:\n list: The collected results.\n '
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
if part_result:
part_list.append(part_result)
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results
|
class BaseStorageBackend(metaclass=ABCMeta):
'Abstract class of storage backends.\n\n All backends need to implement two apis: ``get()`` and ``get_text()``.\n ``get()`` reads the file as a byte stream and ``get_text()`` reads the file\n as texts.\n '
_allow_symlink = False
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
|
class CephBackend(BaseStorageBackend):
"Ceph storage backend (for internal use).\n\n Args:\n path_mapping (dict|None): path mapping dict from local path to Petrel\n path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``\n will be replaced by ``dst``. Default: None.\n\n .. warning::\n :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,\n please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.\n "
def __init__(self, path_mapping=None):
try:
import ceph
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
warnings.warn('CephBackend will be deprecated, please use PetrelBackend instead', DeprecationWarning)
self._client = ceph.S3Client()
assert (isinstance(path_mapping, dict) or (path_mapping is None))
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if (self.path_mapping is not None):
for (k, v) in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath, encoding=None):
raise NotImplementedError
|
class PetrelBackend(BaseStorageBackend):
"Petrel storage backend (for internal use).\n\n PetrelBackend supports reading and writing data to multiple clusters.\n If the file path contains the cluster name, PetrelBackend will read data\n from specified cluster or write data to it. Otherwise, PetrelBackend will\n access the default cluster.\n\n Args:\n path_mapping (dict, optional): Path mapping dict from local path to\n Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in\n ``filepath`` will be replaced by ``dst``. Default: None.\n enable_mc (bool, optional): Whether to enable memcached support.\n Default: True.\n\n Examples:\n >>> filepath1 = 's3://path/of/file'\n >>> filepath2 = 'cluster-name:s3://path/of/file'\n >>> client = PetrelBackend()\n >>> client.get(filepath1) # get data from default cluster\n >>> client.get(filepath2) # get data from 'cluster-name' cluster\n "
def __init__(self, path_mapping: Optional[dict]=None, enable_mc: bool=True):
try:
from petrel_client import client
except ImportError:
raise ImportError('Please install petrel_client to enable PetrelBackend.')
self._client = client.Client(enable_mc=enable_mc)
assert (isinstance(path_mapping, dict) or (path_mapping is None))
self.path_mapping = path_mapping
def _map_path(self, filepath: Union[(str, Path)]) -> str:
'Map ``filepath`` to a string path whose prefix will be replaced by\n :attr:`self.path_mapping`.\n\n Args:\n filepath (str): Path to be mapped.\n '
filepath = str(filepath)
if (self.path_mapping is not None):
for (k, v) in self.path_mapping.items():
filepath = filepath.replace(k, v)
return filepath
def _format_path(self, filepath: str) -> str:
"Convert a ``filepath`` to standard format of petrel oss.\n\n If the ``filepath`` is concatenated by ``os.path.join``, in a Windows\n environment, the ``filepath`` will be the format of\n 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the\n above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.\n\n Args:\n filepath (str): Path to be formatted.\n "
return re.sub('\\\\+', '/', filepath)
def get(self, filepath: Union[(str, Path)]) -> memoryview:
"Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n memoryview: A memory view of expected bytes object to avoid\n copying. The memoryview object can be converted to bytes by\n ``value_buf.tobytes()``.\n "
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath: Union[(str, Path)], encoding: str='utf-8') -> str:
"Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n "
return str(self.get(filepath), encoding=encoding)
def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None:
'Save data to a given ``filepath``.\n\n Args:\n obj (bytes): Data to be saved.\n filepath (str or Path): Path to write data.\n '
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
self._client.put(filepath, obj)
def put_text(self, obj: str, filepath: Union[(str, Path)], encoding: str='utf-8') -> None:
"Save data to a given ``filepath``.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str): The encoding format used to encode the ``obj``.\n Default: 'utf-8'.\n "
self.put(bytes(obj, encoding=encoding), filepath)
def remove(self, filepath: Union[(str, Path)]) -> None:
'Remove a file.\n\n Args:\n filepath (str or Path): Path to be removed.\n '
if (not has_method(self._client, 'delete')):
raise NotImplementedError('Current version of Petrel Python SDK has not supported the `delete` method, please use a higher version or dev branch instead.')
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
self._client.delete(filepath)
def exists(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n '
if (not (has_method(self._client, 'contains') and has_method(self._client, 'isdir'))):
raise NotImplementedError('Current version of Petrel Python SDK has not supported the `contains` and `isdir` methods, please use a higherversion or dev branch instead.')
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
return (self._client.contains(filepath) or self._client.isdir(filepath))
def isdir(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n '
if (not has_method(self._client, 'isdir')):
raise NotImplementedError('Current version of Petrel Python SDK has not supported the `isdir` method, please use a higher version or dev branch instead.')
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
return self._client.isdir(filepath)
def isfile(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n '
if (not has_method(self._client, 'contains')):
raise NotImplementedError('Current version of Petrel Python SDK has not supported the `contains` method, please use a higher version or dev branch instead.')
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
return self._client.contains(filepath)
def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str:
'Concatenate all file paths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result after concatenation.\n '
filepath = self._format_path(self._map_path(filepath))
if filepath.endswith('/'):
filepath = filepath[:(- 1)]
formatted_paths = [filepath]
for path in filepaths:
formatted_paths.append(self._format_path(self._map_path(path)))
return '/'.join(formatted_paths)
@contextmanager
def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[str]:
"Download a file from ``filepath`` and return a temporary path.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Args:\n filepath (str | Path): Download a file from ``filepath``.\n\n Examples:\n >>> client = PetrelBackend()\n >>> # After existing from the ``with`` clause,\n >>> # the path will be removed\n >>> with client.get_local_path('s3://path/of/your/file') as path:\n ... # do something here\n\n Yields:\n Iterable[str]: Only yield one temporary path.\n "
filepath = self._map_path(filepath)
filepath = self._format_path(filepath)
assert self.isfile(filepath)
try:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(self.get(filepath))
f.close()
(yield f.name)
finally:
os.remove(f.name)
def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]:
"Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n Petrel has no concept of directories but it simulates the directory\n hierarchy in the filesystem through public prefixes. In addition,\n if the returned path ends with '/', it means the path is a public\n prefix which is a logical directory.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n In addition, the returned path of directory will not contains the\n suffix '/' which is consistent with other backends.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n "
if (not has_method(self._client, 'list')):
raise NotImplementedError('Current version of Petrel Python SDK has not supported the `list` method, please use a higher version or dev branch instead.')
dir_path = self._map_path(dir_path)
dir_path = self._format_path(dir_path)
if (list_dir and (suffix is not None)):
raise TypeError('`list_dir` should be False when `suffix` is not None')
if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))):
raise TypeError('`suffix` must be a string or tuple of strings')
if (not dir_path.endswith('/')):
dir_path += '/'
root = dir_path
def _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive):
for path in self._client.list(dir_path):
if path.endswith('/'):
next_dir_path = self.join_path(dir_path, path)
if list_dir:
rel_dir = next_dir_path[len(root):(- 1)]
(yield rel_dir)
if recursive:
(yield from _list_dir_or_file(next_dir_path, list_dir, list_file, suffix, recursive))
else:
absolute_path = self.join_path(dir_path, path)
rel_path = absolute_path[len(root):]
if (((suffix is None) or rel_path.endswith(suffix)) and list_file):
(yield rel_path)
return _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive)
|
class MemcachedBackend(BaseStorageBackend):
'Memcached storage backend.\n\n Attributes:\n server_list_cfg (str): Config file for memcached server list.\n client_cfg (str): Config file for memcached client.\n sys_path (str | None): Additional path to be appended to `sys.path`.\n Default: None.\n '
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if (sys_path is not None):
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError('Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath, encoding=None):
raise NotImplementedError
|
class LmdbBackend(BaseStorageBackend):
'Lmdb storage backend.\n\n Args:\n db_path (str): Lmdb database path.\n readonly (bool, optional): Lmdb environment parameter. If True,\n disallow any write operations. Default: True.\n lock (bool, optional): Lmdb environment parameter. If False, when\n concurrent access occurs, do not lock the database. Default: False.\n readahead (bool, optional): Lmdb environment parameter. If False,\n disable the OS filesystem readahead mechanism, which may improve\n random read performance when a database is larger than RAM.\n Default: False.\n\n Attributes:\n db_path (str): Lmdb database path.\n '
def __init__(self, db_path, readonly=True, lock=False, readahead=False, **kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
self.db_path = str(db_path)
self._client = lmdb.open(self.db_path, readonly=readonly, lock=lock, readahead=readahead, **kwargs)
def get(self, filepath):
'Get values according to the filepath.\n\n Args:\n filepath (str | obj:`Path`): Here, filepath is the lmdb key.\n '
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
def get_text(self, filepath, encoding=None):
raise NotImplementedError
|
class HardDiskBackend(BaseStorageBackend):
'Raw hard disks storage backend.'
_allow_symlink = True
def get(self, filepath: Union[(str, Path)]) -> bytes:
"Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n bytes: Expected bytes object.\n "
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath: Union[(str, Path)], encoding: str='utf-8') -> str:
"Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n "
with open(filepath, 'r', encoding=encoding) as f:
value_buf = f.read()
return value_buf
def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None:
"Write data to a given ``filepath`` with 'wb' mode.\n\n Note:\n ``put`` will create a directory if the directory of ``filepath``\n does not exist.\n\n Args:\n obj (bytes): Data to be written.\n filepath (str or Path): Path to write data.\n "
mmcv.mkdir_or_exist(osp.dirname(filepath))
with open(filepath, 'wb') as f:
f.write(obj)
def put_text(self, obj: str, filepath: Union[(str, Path)], encoding: str='utf-8') -> None:
"Write data to a given ``filepath`` with 'w' mode.\n\n Note:\n ``put_text`` will create a directory if the directory of\n ``filepath`` does not exist.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n "
mmcv.mkdir_or_exist(osp.dirname(filepath))
with open(filepath, 'w', encoding=encoding) as f:
f.write(obj)
def remove(self, filepath: Union[(str, Path)]) -> None:
'Remove a file.\n\n Args:\n filepath (str or Path): Path to be removed.\n '
os.remove(filepath)
def exists(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n '
return osp.exists(filepath)
def isdir(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n '
return osp.isdir(filepath)
def isfile(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n '
return osp.isfile(filepath)
def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str:
'Concatenate all file paths.\n\n Join one or more filepath components intelligently. The return value\n is the concatenation of filepath and any members of *filepaths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result of concatenation.\n '
return osp.join(filepath, *filepaths)
@contextmanager
def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[Union[(str, Path)]]:
'Only for unified API and do nothing.'
(yield filepath)
def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]:
'Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n '
if (list_dir and (suffix is not None)):
raise TypeError('`suffix` should be None when `list_dir` is True')
if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))):
raise TypeError('`suffix` must be a string or tuple of strings')
root = dir_path
def _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive):
for entry in os.scandir(dir_path):
if ((not entry.name.startswith('.')) and entry.is_file()):
rel_path = osp.relpath(entry.path, root)
if (((suffix is None) or rel_path.endswith(suffix)) and list_file):
(yield rel_path)
elif osp.isdir(entry.path):
if list_dir:
rel_dir = osp.relpath(entry.path, root)
(yield rel_dir)
if recursive:
(yield from _list_dir_or_file(entry.path, list_dir, list_file, suffix, recursive))
return _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive)
|
class HTTPBackend(BaseStorageBackend):
'HTTP and HTTPS storage bachend.'
def get(self, filepath):
value_buf = urlopen(filepath).read()
return value_buf
def get_text(self, filepath, encoding='utf-8'):
value_buf = urlopen(filepath).read()
return value_buf.decode(encoding)
@contextmanager
def get_local_path(self, filepath: str) -> Iterable[str]:
"Download a file from ``filepath``.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Args:\n filepath (str): Download a file from ``filepath``.\n\n Examples:\n >>> client = HTTPBackend()\n >>> # After existing from the ``with`` clause,\n >>> # the path will be removed\n >>> with client.get_local_path('http://path/of/your/file') as path:\n ... # do something here\n "
try:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(self.get(filepath))
f.close()
(yield f.name)
finally:
os.remove(f.name)
|
class FileClient():
'A general file client to access files in different backends.\n\n The client loads a file or text in a specified backend from its path\n and returns it as a binary or text file. There are two ways to choose a\n backend, the name of backend and the prefix of path. Although both of them\n can be used to choose a storage backend, ``backend`` has a higher priority\n that is if they are all set, the storage backend will be chosen by the\n backend argument. If they are all `None`, the disk backend will be chosen.\n Note that It can also register other backend accessor with a given name,\n prefixes, and backend class. In addition, We use the singleton pattern to\n avoid repeated object creation. If the arguments are the same, the same\n object will be returned.\n\n Args:\n backend (str, optional): The storage backend type. Options are "disk",\n "ceph", "memcached", "lmdb", "http" and "petrel". Default: None.\n prefix (str, optional): The prefix of the registered storage backend.\n Options are "s3", "http", "https". Default: None.\n\n Examples:\n >>> # only set backend\n >>> file_client = FileClient(backend=\'petrel\')\n >>> # only set prefix\n >>> file_client = FileClient(prefix=\'s3\')\n >>> # set both backend and prefix but use backend to choose client\n >>> file_client = FileClient(backend=\'petrel\', prefix=\'s3\')\n >>> # if the arguments are the same, the same object is returned\n >>> file_client1 = FileClient(backend=\'petrel\')\n >>> file_client1 is file_client\n True\n\n Attributes:\n client (:obj:`BaseStorageBackend`): The backend object.\n '
_backends = {'disk': HardDiskBackend, 'ceph': CephBackend, 'memcached': MemcachedBackend, 'lmdb': LmdbBackend, 'petrel': PetrelBackend, 'http': HTTPBackend}
_overridden_backends = set()
_prefix_to_backends = {'s3': PetrelBackend, 'http': HTTPBackend, 'https': HTTPBackend}
_overridden_prefixes = set()
_instances = {}
def __new__(cls, backend=None, prefix=None, **kwargs):
if ((backend is None) and (prefix is None)):
backend = 'disk'
if ((backend is not None) and (backend not in cls._backends)):
raise ValueError(f'Backend {backend} is not supported. Currently supported ones are {list(cls._backends.keys())}')
if ((prefix is not None) and (prefix not in cls._prefix_to_backends)):
raise ValueError(f'prefix {prefix} is not supported. Currently supported ones are {list(cls._prefix_to_backends.keys())}')
arg_key = f'{backend}:{prefix}'
for (key, value) in kwargs.items():
arg_key += f':{key}:{value}'
if ((arg_key in cls._instances) and (backend not in cls._overridden_backends) and (prefix not in cls._overridden_prefixes)):
_instance = cls._instances[arg_key]
else:
_instance = super().__new__(cls)
if (backend is not None):
_instance.client = cls._backends[backend](**kwargs)
else:
_instance.client = cls._prefix_to_backends[prefix](**kwargs)
cls._instances[arg_key] = _instance
return _instance
@property
def name(self):
return self.client.name
@property
def allow_symlink(self):
return self.client.allow_symlink
@staticmethod
def parse_uri_prefix(uri: Union[(str, Path)]) -> Optional[str]:
"Parse the prefix of a uri.\n\n Args:\n uri (str | Path): Uri to be parsed that contains the file prefix.\n\n Examples:\n >>> FileClient.parse_uri_prefix('s3://path/of/your/file')\n 's3'\n\n Returns:\n str | None: Return the prefix of uri if the uri contains '://' else\n ``None``.\n "
assert is_filepath(uri)
uri = str(uri)
if ('://' not in uri):
return None
else:
(prefix, _) = uri.split('://')
if (':' in prefix):
(_, prefix) = prefix.split(':')
return prefix
@classmethod
def infer_client(cls, file_client_args: Optional[dict]=None, uri: Optional[Union[(str, Path)]]=None) -> 'FileClient':
"Infer a suitable file client based on the URI and arguments.\n\n Args:\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. Default: None.\n uri (str | Path, optional): Uri to be parsed that contains the file\n prefix. Default: None.\n\n Examples:\n >>> uri = 's3://path/of/your/file'\n >>> file_client = FileClient.infer_client(uri=uri)\n >>> file_client_args = {'backend': 'petrel'}\n >>> file_client = FileClient.infer_client(file_client_args)\n\n Returns:\n FileClient: Instantiated FileClient object.\n "
assert ((file_client_args is not None) or (uri is not None))
if (file_client_args is None):
file_prefix = cls.parse_uri_prefix(uri)
return cls(prefix=file_prefix)
else:
return cls(**file_client_args)
@classmethod
def _register_backend(cls, name, backend, force=False, prefixes=None):
if (not isinstance(name, str)):
raise TypeError(f'the backend name should be a string, but got {type(name)}')
if (not inspect.isclass(backend)):
raise TypeError(f'backend should be a class but got {type(backend)}')
if (not issubclass(backend, BaseStorageBackend)):
raise TypeError(f'backend {backend} is not a subclass of BaseStorageBackend')
if ((not force) and (name in cls._backends)):
raise KeyError(f'{name} is already registered as a storage backend, add "force=True" if you want to override it')
if ((name in cls._backends) and force):
cls._overridden_backends.add(name)
cls._backends[name] = backend
if (prefixes is not None):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if (prefix not in cls._prefix_to_backends):
cls._prefix_to_backends[prefix] = backend
elif ((prefix in cls._prefix_to_backends) and force):
cls._overridden_prefixes.add(prefix)
cls._prefix_to_backends[prefix] = backend
else:
raise KeyError(f'{prefix} is already registered as a storage backend, add "force=True" if you want to override it')
@classmethod
def register_backend(cls, name, backend=None, force=False, prefixes=None):
"Register a backend to FileClient.\n\n This method can be used as a normal class method or a decorator.\n\n .. code-block:: python\n\n class NewBackend(BaseStorageBackend):\n\n def get(self, filepath):\n return filepath\n\n def get_text(self, filepath):\n return filepath\n\n FileClient.register_backend('new', NewBackend)\n\n or\n\n .. code-block:: python\n\n @FileClient.register_backend('new')\n class NewBackend(BaseStorageBackend):\n\n def get(self, filepath):\n return filepath\n\n def get_text(self, filepath):\n return filepath\n\n Args:\n name (str): The name of the registered backend.\n backend (class, optional): The backend class to be registered,\n which must be a subclass of :class:`BaseStorageBackend`.\n When this method is used as a decorator, backend is None.\n Defaults to None.\n force (bool, optional): Whether to override the backend if the name\n has already been registered. Defaults to False.\n prefixes (str or list[str] or tuple[str], optional): The prefixes\n of the registered storage backend. Default: None.\n `New in version 1.3.15.`\n "
if (backend is not None):
cls._register_backend(name, backend, force=force, prefixes=prefixes)
return
def _register(backend_cls):
cls._register_backend(name, backend_cls, force=force, prefixes=prefixes)
return backend_cls
return _register
def get(self, filepath: Union[(str, Path)]) -> Union[(bytes, memoryview)]:
"Read data from a given ``filepath`` with 'rb' mode.\n\n Note:\n There are two types of return values for ``get``, one is ``bytes``\n and the other is ``memoryview``. The advantage of using memoryview\n is that you can avoid copying, and if you want to convert it to\n ``bytes``, you can use ``.tobytes()``.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n bytes | memoryview: Expected bytes object or a memory view of the\n bytes object.\n "
return self.client.get(filepath)
def get_text(self, filepath: Union[(str, Path)], encoding='utf-8') -> str:
"Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n "
return self.client.get_text(filepath, encoding)
def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None:
"Write data to a given ``filepath`` with 'wb' mode.\n\n Note:\n ``put`` should create a directory if the directory of ``filepath``\n does not exist.\n\n Args:\n obj (bytes): Data to be written.\n filepath (str or Path): Path to write data.\n "
self.client.put(obj, filepath)
def put_text(self, obj: str, filepath: Union[(str, Path)]) -> None:
"Write data to a given ``filepath`` with 'w' mode.\n\n Note:\n ``put_text`` should create a directory if the directory of\n ``filepath`` does not exist.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str, optional): The encoding format used to open the\n `filepath`. Default: 'utf-8'.\n "
self.client.put_text(obj, filepath)
def remove(self, filepath: Union[(str, Path)]) -> None:
'Remove a file.\n\n Args:\n filepath (str, Path): Path to be removed.\n '
self.client.remove(filepath)
def exists(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n '
return self.client.exists(filepath)
def isdir(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n '
return self.client.isdir(filepath)
def isfile(self, filepath: Union[(str, Path)]) -> bool:
'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n '
return self.client.isfile(filepath)
def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str:
'Concatenate all file paths.\n\n Join one or more filepath components intelligently. The return value\n is the concatenation of filepath and any members of *filepaths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result of concatenation.\n '
return self.client.join_path(filepath, *filepaths)
@contextmanager
def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[str]:
"Download data from ``filepath`` and write the data to local path.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Note:\n If the ``filepath`` is a local path, just return itself.\n\n .. warning::\n ``get_local_path`` is an experimental interface that may change in\n the future.\n\n Args:\n filepath (str or Path): Path to be read data.\n\n Examples:\n >>> file_client = FileClient(prefix='s3')\n >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:\n ... # do something here\n\n Yields:\n Iterable[str]: Only yield one path.\n "
with self.client.get_local_path(str(filepath)) as local_path:
(yield local_path)
def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]:
'Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n '
(yield from self.client.list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive))
|
class BaseFileHandler(metaclass=ABCMeta):
str_like = True
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode='r', **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode='w', **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs)
|
def set_default(obj):
'Set default json values for non-serializable values.\n\n It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.\n It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,\n etc.) into plain numbers of plain python built-in types.\n '
if isinstance(obj, (set, range)):
return list(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.generic):
return obj.item()
raise TypeError(f'{type(obj)} is unsupported for json dump')
|
class JsonHandler(BaseFileHandler):
def load_from_fileobj(self, file):
return json.load(file)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('default', set_default)
json.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('default', set_default)
return json.dumps(obj, **kwargs)
|
class PickleHandler(BaseFileHandler):
str_like = False
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super(PickleHandler, self).load_from_path(filepath, mode='rb', **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('protocol', 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('protocol', 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super(PickleHandler, self).dump_to_path(obj, filepath, mode='wb', **kwargs)
|
class YamlHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
kwargs.setdefault('Loader', Loader)
return yaml.load(file, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('Dumper', Dumper)
yaml.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('Dumper', Dumper)
return yaml.dump(obj, **kwargs)
|
def load(file, file_format=None, file_client_args=None, **kwargs):
'Load data from json/yaml/pickle files.\n\n This method provides a unified api for loading data from serialized files.\n\n Note:\n In v1.3.16 and later, ``load`` supports loading data from serialized\n files those can be storaged in different backends.\n\n Args:\n file (str or :obj:`Path` or file-like object): Filename or a file-like\n object.\n file_format (str, optional): If not specified, the file format will be\n inferred from the file extension, otherwise use the specified one.\n Currently supported formats include "json", "yaml/yml" and\n "pickle/pkl".\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> load(\'/path/of/your/file\') # file is storaged in disk\n >>> load(\'https://path/of/your/file\') # file is storaged in Internet\n >>> load(\'s3://path/of/your/file\') # file is storaged in petrel\n\n Returns:\n The content from the file.\n '
if isinstance(file, Path):
file = str(file)
if ((file_format is None) and is_str(file)):
file_format = file.split('.')[(- 1)]
if (file_format not in file_handlers):
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
if is_str(file):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO(file_client.get_text(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
else:
with BytesIO(file_client.get(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
|
def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs):
"Dump data to json/yaml/pickle strings or files.\n\n This method provides a unified api for dumping data as strings or to files,\n and also supports custom arguments for each file format.\n\n Note:\n In v1.3.16 and later, ``dump`` supports dumping data as strings or to\n files which is saved to different backends.\n\n Args:\n obj (any): The python object to be dumped.\n file (str or :obj:`Path` or file-like object, optional): If not\n specified, then the object is dumped to a str, otherwise to a file\n specified by the filename or file-like object.\n file_format (str, optional): Same as :func:`load`.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> dump('hello world', '/path/of/your/file') # disk\n >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel\n\n Returns:\n bool: True for success, False otherwise.\n "
if isinstance(file, Path):
file = str(file)
if (file_format is None):
if is_str(file):
file_format = file.split('.')[(- 1)]
elif (file is None):
raise ValueError('file_format must be specified since file is None')
if (file_format not in file_handlers):
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
if (file is None):
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put_text(f.getvalue(), file)
else:
with BytesIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put(f.getvalue(), file)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
|
def _register_handler(handler, file_formats):
'Register a handler for some file extensions.\n\n Args:\n handler (:obj:`BaseFileHandler`): Handler to be registered.\n file_formats (str or list[str]): File formats to be handled by this\n handler.\n '
if (not isinstance(handler, BaseFileHandler)):
raise TypeError(f'handler must be a child of BaseFileHandler, not {type(handler)}')
if isinstance(file_formats, str):
file_formats = [file_formats]
if (not is_list_of(file_formats, str)):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
|
def register_handler(file_formats, **kwargs):
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
|
def list_from_file(filename, prefix='', offset=0, max_num=0, encoding='utf-8', file_client_args=None):
"Load a text file and parse the content as a list of strings.\n\n Note:\n In v1.3.16 and later, ``list_from_file`` supports loading a text file\n which can be storaged in different backends and parsing the content as\n a list for strings.\n\n Args:\n filename (str): Filename.\n prefix (str): The prefix to be inserted to the beginning of each item.\n offset (int): The offset of lines.\n max_num (int): The maximum number of lines to be read,\n zeros and negatives mean no limitation.\n encoding (str): Encoding used to open the file. Default utf-8.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> list_from_file('/path/of/your/file') # disk\n ['hello', 'world']\n >>> list_from_file('s3://path/of/your/file') # ceph or petrel\n ['hello', 'world']\n\n Returns:\n list[str]: A list of strings.\n "
cnt = 0
item_list = []
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for _ in range(offset):
f.readline()
for line in f:
if (0 < max_num <= cnt):
break
item_list.append((prefix + line.rstrip('\n\r')))
cnt += 1
return item_list
|
def dict_from_file(filename, key_type=str, encoding='utf-8', file_client_args=None):
"Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns split by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will be parsed as dict values.\n\n Note:\n In v1.3.16 and later, ``dict_from_file`` supports loading a text file\n which can be storaged in different backends and parsing the content as\n a dict.\n\n Args:\n filename(str): Filename.\n key_type(type): Type of the dict keys. str is user by default and\n type conversion will be performed if specified.\n encoding (str): Encoding used to open the file. Default utf-8.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> dict_from_file('/path/of/your/file') # disk\n {'key1': 'value1', 'key2': 'value2'}\n >>> dict_from_file('s3://path/of/your/file') # ceph or petrel\n {'key1': 'value1', 'key2': 'value2'}\n\n Returns:\n dict: The parsed contents.\n "
mapping = {}
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for line in f:
items = line.rstrip('\n').split()
assert (len(items) >= 2)
key = key_type(items[0])
val = (items[1:] if (len(items) > 2) else items[1])
mapping[key] = val
return mapping
|
def imconvert(img, src, dst):
"Convert an image from the src colorspace to dst colorspace.\n\n Args:\n img (ndarray): The input image.\n src (str): The source colorspace, e.g., 'rgb', 'hsv'.\n dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.\n\n Returns:\n ndarray: The converted image.\n "
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
out_img = cv2.cvtColor(img, code)
return out_img
|
def bgr2gray(img, keepdim=False):
'Convert a BGR image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n '
out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if keepdim:
out_img = out_img[(..., None)]
return out_img
|
def rgb2gray(img, keepdim=False):
'Convert a RGB image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n '
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if keepdim:
out_img = out_img[(..., None)]
return out_img
|
def gray2bgr(img):
'Convert a grayscale image to BGR image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n '
img = (img[(..., None)] if (img.ndim == 2) else img)
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return out_img
|
def gray2rgb(img):
'Convert a grayscale image to RGB image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted RGB image.\n '
img = (img[(..., None)] if (img.ndim == 2) else img)
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
return out_img
|
def _convert_input_type_range(img):
'Convert the type and range of the input image.\n\n It converts the input image to np.float32 type and range of [0, 1].\n It is mainly used for pre-processing the input image in colorspace\n conversion functions such as rgb2ycbcr and ycbcr2rgb.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n (ndarray): The converted image with type of np.float32 and range of\n [0, 1].\n '
img_type = img.dtype
img = img.astype(np.float32)
if (img_type == np.float32):
pass
elif (img_type == np.uint8):
img /= 255.0
else:
raise TypeError(f'The img type should be np.float32 or np.uint8, but got {img_type}')
return img
|
def _convert_output_type_range(img, dst_type):
'Convert the type and range of the image according to dst_type.\n\n It converts the image to desired type and range. If `dst_type` is np.uint8,\n images will be converted to np.uint8 type with range [0, 255]. If\n `dst_type` is np.float32, it converts the image to np.float32 type with\n range [0, 1].\n It is mainly used for post-processing images in colorspace conversion\n functions such as rgb2ycbcr and ycbcr2rgb.\n\n Args:\n img (ndarray): The image to be converted with np.float32 type and\n range [0, 255].\n dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it\n converts the image to np.uint8 type with range [0, 255]. If\n dst_type is np.float32, it converts the image to np.float32 type\n with range [0, 1].\n\n Returns:\n (ndarray): The converted image with desired type and range.\n '
if (dst_type not in (np.uint8, np.float32)):
raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}')
if (dst_type == np.uint8):
img = img.round()
else:
img /= 255.0
return img.astype(dst_type)
|
def rgb2ycbcr(img, y_only=False):
"Convert a RGB image to YCbCr image.\n\n This function produces the same results as Matlab's `rgb2ycbcr` function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n y_only (bool): Whether to only return Y channel. Default: False.\n\n Returns:\n ndarray: The converted YCbCr image. The output image has the same type\n and range as input image.\n "
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = (np.dot(img, [65.481, 128.553, 24.966]) + 16.0)
else:
out_img = (np.matmul(img, [[65.481, (- 37.797), 112.0], [128.553, (- 74.203), (- 93.786)], [24.966, 112.0, (- 18.214)]]) + [16, 128, 128])
out_img = _convert_output_type_range(out_img, img_type)
return out_img
|
def bgr2ycbcr(img, y_only=False):
'Convert a BGR image to YCbCr image.\n\n The bgr version of rgb2ycbcr.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n y_only (bool): Whether to only return Y channel. Default: False.\n\n Returns:\n ndarray: The converted YCbCr image. The output image has the same type\n and range as input image.\n '
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = (np.dot(img, [24.966, 128.553, 65.481]) + 16.0)
else:
out_img = (np.matmul(img, [[24.966, 112.0, (- 18.214)], [128.553, (- 74.203), (- 93.786)], [65.481, (- 37.797), 112.0]]) + [16, 128, 128])
out_img = _convert_output_type_range(out_img, img_type)
return out_img
|
def ycbcr2rgb(img):
"Convert a YCbCr image to RGB image.\n\n This function produces the same results as Matlab's ycbcr2rgb function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n ndarray: The converted RGB image. The output image has the same type\n and range as input image.\n "
img_type = img.dtype
img = (_convert_input_type_range(img) * 255)
out_img = ((np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, (- 0.00153632), 0.00791071], [0.00625893, (- 0.00318811), 0]]) * 255.0) + [(- 222.921), 135.576, (- 276.836)])
out_img = _convert_output_type_range(out_img, img_type)
return out_img
|
def ycbcr2bgr(img):
'Convert a YCbCr image to BGR image.\n\n The bgr version of ycbcr2rgb.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n ndarray: The converted BGR image. The output image has the same type\n and range as input image.\n '
img_type = img.dtype
img = (_convert_input_type_range(img) * 255)
out_img = ((np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, (- 0.00153632), 0], [0, (- 0.00318811), 0.00625893]]) * 255.0) + [(- 276.836), 135.576, (- 222.921)])
out_img = _convert_output_type_range(out_img, img_type)
return out_img
|
def convert_color_factory(src, dst):
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
def convert_color(img):
out_img = cv2.cvtColor(img, code)
return out_img
convert_color.__doc__ = f'''Convert a {src.upper()} image to {dst.upper()}
image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted {dst.upper()} image.
'''
return convert_color
|
def tensor2imgs(tensor, mean=None, std=None, to_rgb=True):
'Convert tensor to 3-channel images or 1-channel gray images.\n\n Args:\n tensor (torch.Tensor): Tensor that contains multiple images, shape (\n N, C, H, W). :math:`C` can be either 3 or 1.\n mean (tuple[float], optional): Mean of images. If None,\n (0, 0, 0) will be used for tensor with 3-channel,\n while (0, ) for tensor with 1-channel. Defaults to None.\n std (tuple[float], optional): Standard deviation of images. If None,\n (1, 1, 1) will be used for tensor with 3-channel,\n while (1, ) for tensor with 1-channel. Defaults to None.\n to_rgb (bool, optional): Whether the tensor was converted to RGB\n format in the first place. If so, convert it back to BGR.\n For the tensor with 1 channel, it must be False. Defaults to True.\n\n Returns:\n list[np.ndarray]: A list that contains multiple images.\n '
if (torch is None):
raise RuntimeError('pytorch is not installed')
assert (torch.is_tensor(tensor) and (tensor.ndim == 4))
channels = tensor.size(1)
assert (channels in [1, 3])
if (mean is None):
mean = ((0,) * channels)
if (std is None):
std = ((1,) * channels)
assert ((channels == len(mean) == len(std) == 3) or ((channels == len(mean) == len(std) == 1) and (not to_rgb)))
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[(img_id, ...)].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
|
def is_custom_op_loaded():
(bright_style, reset_style) = ('\x1b[1m', '\x1b[0m')
(red_text, blue_text) = ('\x1b[31m', '\x1b[34m')
white_background = '\x1b[107m'
msg = ((white_background + bright_style) + red_text)
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += (blue_text + 'Welcome to use the unified model deployment toolbox ')
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
flag = False
try:
from ..tensorrt import is_tensorrt_plugin_loaded
flag = is_tensorrt_plugin_loaded()
except (ImportError, ModuleNotFoundError):
pass
if (not flag):
try:
from ..ops import get_onnxruntime_op_path
ort_lib_path = get_onnxruntime_op_path()
flag = os.path.exists(ort_lib_path)
except (ImportError, ModuleNotFoundError):
pass
return (flag or (torch.__version__ == 'parrots'))
|
def _parse_arg(value, desc):
if (desc == 'none'):
return value
if ((desc == 'v') or (not _is_value(value))):
return value
if value.node().mustBeNone():
return None
if (value.node().kind() == 'onnx::Constant'):
tval = value.node()['value']
if (desc == 'i'):
return int(tval)
elif (desc == 'f'):
return float(tval)
elif (desc == 'b'):
return bool(tval)
elif (desc == 's'):
return str(tval)
elif (desc == 't'):
return tval
elif (desc == 'is'):
return [int(v) for v in tval]
elif (desc == 'fs'):
return [float(v) for v in tval]
else:
raise RuntimeError("ONNX symbolic doesn't know to interpret Constant node")
elif (value.node().kind() == 'prim::ListConstruct'):
if (desc == 'is'):
for v in value.node().inputs():
if (v.node().kind() != 'onnx::Constant'):
raise RuntimeError((("Failed to export an ONNX attribute '" + v.node().kind()) + "', since it's not constant, please try to make things (e.g., kernel size) static if possible"))
return [int(v.node()['value']) for v in value.node().inputs()]
else:
raise RuntimeError("ONNX symbolic doesn't know to interpret ListConstruct node")
raise RuntimeError('Unexpected node type: {}'.format(value.node().kind()))
|
def _maybe_get_const(value, desc):
if (_is_value(value) and (value.node().kind() == 'onnx::Constant')):
return _parse_arg(value, desc)
return value
|
def _maybe_get_scalar(value):
value_t = _maybe_get_const(value, 't')
if (isinstance(value_t, torch.Tensor) and (value_t.shape == ())):
return value_t
return value
|
def _get_const(value, desc, arg_name):
if (_is_value(value) and (value.node().kind() not in ('onnx::Constant', 'prim::Constant'))):
raise RuntimeError('ONNX symbolic expected a constant value of the {} argument, got `{}`'.format(arg_name, value))
return _parse_arg(value, desc)
|
def _unpack_list(list_value):
list_node = list_value.node()
assert (list_node.kind() == 'prim::ListConstruct')
return list(list_node.inputs())
|
def _is_packed_list(list_value):
return (_is_value(list_value) and (list_value.node().kind() == 'prim::ListConstruct'))
|
def parse_args(*arg_descriptors):
def decorator(fn):
fn._arg_descriptors = arg_descriptors
def wrapper(g, *args):
assert (len(arg_descriptors) >= len(args))
args = [_parse_arg(arg, arg_desc) for (arg, arg_desc) in zip(args, arg_descriptors)]
return fn(g, *args)
try:
wrapper = wraps(fn)(wrapper)
except Exception:
pass
return wrapper
return decorator
|
def _scalar(x):
'Convert a scalar tensor into a Python value.'
assert (x.numel() == 1)
return x.item()
|
def _if_scalar_type_as(g, self, tensor):
'Convert self into the same type of tensor, as necessary.'
if isinstance(self, torch._C.Value):
return self
scalar_type = tensor.type().scalarType()
if scalar_type:
ty = scalar_type.lower()
return getattr(self, ty)()
return self
|
def _is_none(x):
return x.node().mustBeNone()
|
def _is_value(x):
return isinstance(x, torch._C.Value)
|
def _is_tensor_list(x):
return x.type().isSubtypeOf(ListType.ofTensors())
|
def _unimplemented(op, msg):
warnings.warn((((('ONNX export failed on ' + op) + ' because ') + msg) + ' not supported'))
|
def _try_get_scalar_type(*args):
for arg in args:
try:
return arg.type().scalarType()
except RuntimeError:
pass
return None
|
def _topk_helper(g, input, k, dim, largest=True, sorted=False, out=None):
if (out is not None):
_unimplemented('TopK', 'Out parameter is not supported')
if (not _is_value(k)):
k = g.op('Constant', value_t=torch.tensor([k], dtype=torch.int64))
else:
k = g.op('Reshape', k, g.op('Constant', value_t=torch.tensor([1])))
return g.op('TopK', input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2)
|
def _slice_helper(g, input, axes, starts, ends, steps=None, dynamic_slice=False):
from torch.onnx.symbolic_opset10 import _slice
return _slice(g, input, axes, starts, ends, steps, dynamic_slice)
|
def _unsqueeze_helper(g, input, dim):
from torch.onnx.symbolic_opset9 import unsqueeze
return unsqueeze(g, input, dim)
|
def _interpolate_size_to_scales(g, input, output_size, dim):
output_size = _maybe_get_const(output_size, 'is')
if _is_value(output_size):
offset = 2
offsets = g.op('Constant', value_t=torch.ones(offset, dtype=torch.float32))
dividend = g.op('Cast', output_size, to_i=cast_pytorch_to_onnx['Float'])
divisor = _slice_helper(g, g.op('Shape', input), axes=[0], ends=[maxsize], starts=[offset])
divisor = g.op('Cast', divisor, to_i=cast_pytorch_to_onnx['Float'])
scale_dims = g.op('Div', dividend, divisor)
scales = g.op('Concat', offsets, scale_dims, axis_i=0)
else:
scales_constant = [(1.0 if (i < 2) else (float(output_size[(- (dim - i))]) / float(input.type().sizes()[(- (dim - i))]))) for i in range(0, dim)]
scales = g.op('Constant', value_t=torch.tensor(scales_constant, dtype=torch.float32))
return scales
|
def _interpolate_get_scales_if_available(g, scales):
if (len(scales) == 0):
return None
scale_desc = ('fs' if ((scales[0].type().kind() == 'ListType') or ((scales[0].type().kind() == 'TensorType') and (sum(scales[0].type().sizes()) > 1))) else 'f')
available_scales = ((_maybe_get_const(scales[0], scale_desc) != (- 1)) and (not _is_none(scales[0])))
if (not available_scales):
return None
offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32))
if (scale_desc == 'fs'):
scales_list = g.op('Constant', value_t=torch.tensor(_maybe_get_const(scales[0], scale_desc)))
scales = g.op('Concat', offsets, scales_list, axis_i=0)
else:
scales_list = []
for scale in scales:
unsqueezed_scale = _unsqueeze_helper(g, scale, 0)
unsqueezed_scale = g.op('Cast', unsqueezed_scale, to_i=cast_pytorch_to_onnx['Float'])
scales_list.append(unsqueezed_scale)
scales = g.op('Concat', offsets, *scales_list, axis_i=0)
return scales
|
def _get_interpolate_attributes(g, mode, args):
if (mode == 'nearest'):
align_corners = None
scales = args[0:]
else:
align_corners = args[0]
scales = args[1:]
scales = _interpolate_get_scales_if_available(g, scales)
return (scales, align_corners)
|
def _interpolate_get_scales(g, scale_factor, dim):
offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32))
if isinstance(scale_factor.type(), torch._C.ListType):
return g.op('Concat', offsets, scale_factor, axis_i=0)
else:
scale_factor = _unsqueeze_helper(g, scale_factor, 0)
scale_factor = g.op('Cast', scale_factor, to_i=cast_pytorch_to_onnx['Float'])
scales = [scale_factor for i in range((dim - 2))]
scale_factor = g.op('Concat', offsets, *scales, axis_i=0)
return scale_factor
|
def _size_helper(g, self, dim):
full_shape = g.op('Shape', self)
from torch.onnx.symbolic_opset9 import select
return select(g, full_shape, g.op('Constant', value_t=torch.tensor([0])), dim)
|
def _avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name):
if (divisor_override and (divisor_override.node().kind() != 'prim::Constant')):
return _unimplemented(name, 'divisor_override')
if (not stride):
stride = kernel_size
padding = tuple(tuple_fn(padding))
return padding
|
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
(scales, align_corners) = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
align_corners = sym_help._maybe_get_scalar(align_corners)
transformation_mode = ('asymmetric' if (interpolate_mode == 'nearest') else ('align_corners' if align_corners else 'pytorch_half_pixel'))
empty_tensor = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32))
if (scales is None):
if (('ONNX_BACKEND' in os.environ) and (os.environ['ONNX_BACKEND'] == 'TensorRT')):
input_size = input.type().sizes()
input_size = input_size[:2]
output_size = sym_help._maybe_get_const(output_size, 'is')
input_size.extend(output_size)
output_size = g.op('Constant', value_t=torch.tensor(input_size, dtype=torch.int64))
else:
input_size = g.op('Shape', input)
input_size_beg = sym_help._slice_helper(g, input_size, axes=[0], ends=[2], starts=[0])
output_size = g.op('Cast', output_size, to_i=sym_help.cast_pytorch_to_onnx['Long'])
output_size = g.op('Concat', input_size_beg, output_size, axis_i=0)
scales = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32))
return g.op('Resize', input, empty_tensor, scales, output_size, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor')
else:
return g.op('Resize', input, empty_tensor, scales, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor')
return symbolic_fn
|
@parse_args('v', 'v', 'i', 'i', 'i', 'none')
def topk(g, self, k, dim, largest, sorted, out=None):
return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
|
def masked_select(g, self, mask):
from torch.onnx.symbolic_opset9 import expand_as, nonzero
index = nonzero(g, expand_as(g, mask, self))
return g.op('GatherND', self, index)
|
def _prepare_onnx_paddings(g, dim, pad):
pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op('Constant', value_t=torch.tensor([0])))
extension = g.op('Sub', g.op('Mul', g.op('Constant', value_t=torch.tensor(dim, dtype=torch.int64)), g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))), pad_len)
pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])
paddings = g.op('Concat', pad, g.op('ConstantOfShape', extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0)
paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1), 2])))
paddings = g.op('Transpose', torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0])
paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1)])))
padding_c = g.op('Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])
return padding_c
|
def constant_pad_nd(g, input, padding, value=None):
mode = 'constant'
value = sym_help._maybe_get_scalar(value)
value = sym_help._if_scalar_type_as(g, value, input)
pad = _prepare_onnx_paddings(g, input.type().dim(), padding)
return g.op('Pad', input, pad, value, mode_s=mode)
|
def reflection_pad(g, input, padding):
mode = 'reflect'
paddings = _prepare_onnx_paddings(g, input.type().dim(), padding)
return g.op('Pad', input, paddings, mode_s=mode)
|
def _avg_pool(name, tuple_fn):
@parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none')
def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):
padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)
if (not stride):
stride = kernel_size
if count_include_pad:
input = g.op('Pad', input, g.op('Constant', value_t=torch.tensor(((((0,) * 2) + padding) * 2))), mode_s='constant')
padding = ((0,) * len(padding))
output = g.op('AveragePool', input, kernel_shape_i=tuple_fn(kernel_size), strides_i=tuple_fn(stride), pads_i=(padding * 2), ceil_mode_i=ceil_mode)
return output
return symbolic_fn
|
def _get_im2col_indices_along_dim(g, input_d, kernel_size_d, dilation_d, padding_d, stride_d):
blocks_d = g.op('Add', input_d, g.op('Constant', value_t=torch.tensor((padding_d * 2))))
blocks_d = g.op('Sub', blocks_d, g.op('Constant', value_t=torch.tensor((dilation_d * (kernel_size_d - 1)))))
blocks_d_indices = g.op('Range', g.op('Constant', value_t=torch.tensor(0)), blocks_d, g.op('Constant', value_t=torch.tensor(stride_d)))
kernel_grid = np.arange(0, (kernel_size_d * dilation_d), dilation_d)
kernel_grid = g.op('Constant', value_t=torch.tensor([kernel_grid]))
blocks_d_indices = g.op('Unsqueeze', blocks_d_indices, axes_i=[0])
kernel_mask = g.op('Reshape', kernel_grid, g.op('Constant', value_t=torch.tensor([(- 1), 1])))
block_mask = g.op('Add', blocks_d_indices, kernel_mask)
return block_mask
|
def _get_im2col_padded_input(g, input, padding_h, padding_w):
pad = g.op('Constant', value_t=torch.LongTensor(([0, 0, padding_h, padding_w] * 2)))
return g.op('Pad', input, pad)
|
def _get_im2col_output_shape(g, input, kernel_h, kernel_w):
batch_dim = size(g, input, g.op('Constant', value_t=torch.tensor(0)))
channel_dim = size(g, input, g.op('Constant', value_t=torch.tensor(1)))
channel_unfolded = g.op('Mul', channel_dim, g.op('Constant', value_t=torch.tensor((kernel_h * kernel_w))))
return g.op('Concat', g.op('Unsqueeze', batch_dim, axes_i=[0]), g.op('Unsqueeze', channel_unfolded, axes_i=[0]), g.op('Constant', value_t=torch.tensor([(- 1)])), axis_i=0)
|
def size(g, self, dim=None):
if (dim is None):
return g.op('Shape', self)
return sym_help._size_helper(g, self, dim)
|
@parse_args('v', 'is', 'is', 'is', 'is')
def im2col(g, input, kernel_size, dilation, padding, stride):
input_h = size(g, input, g.op('Constant', value_t=torch.tensor(2)))
input_w = size(g, input, g.op('Constant', value_t=torch.tensor(3)))
(stride_h, stride_w) = (stride[0], stride[1])
(padding_h, padding_w) = (padding[0], padding[1])
(dilation_h, dilation_w) = (dilation[0], dilation[1])
(kernel_h, kernel_w) = (kernel_size[0], kernel_size[1])
blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h)
blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w)
output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)
padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)
output = g.op('Gather', padded_input, blocks_row_indices, axis_i=2)
output = g.op('Gather', output, blocks_col_indices, axis_i=4)
output = g.op('Transpose', output, perm_i=[0, 1, 2, 4, 3, 5])
return g.op('Reshape', output, output_shape)
|
@parse_args('v', 'i')
def one_hot(g, self, num_classes):
values = g.op('Constant', value_t=torch.LongTensor([0, 1]))
depth = g.op('Constant', value_t=torch.LongTensor([num_classes]))
return g.op('OneHot', self, depth, values, axis_i=(- 1))
|
@parse_args('v', 'i', 'none')
def softmax(g, input, dim, dtype=None):
input_dim = input.type().dim()
if input_dim:
if (dim < 0):
dim = (input_dim + dim)
if (input_dim == (dim + 1)):
softmax = g.op('Softmax', input, axis_i=dim)
if (dtype and (dtype.node().kind() != 'prim::Constant')):
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return softmax
max_value = g.op('ReduceMax', input, axes_i=[dim], keepdims_i=1)
input = g.op('Sub', input, max_value)
exp = g.op('Exp', input)
sum = g.op('ReduceSum', exp, axes_i=[dim])
softmax = g.op('Div', exp, sum)
if (dtype and (dtype.node().kind() != 'prim::Constant')):
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return softmax
|
def _adaptive_pool(name, type, tuple_fn, fn=None):
@parse_args('v', 'is')
def symbolic_fn(g, input, output_size):
if ((output_size == ([1] * len(output_size))) and (type == 'AveragePool')):
return g.op('GlobalAveragePool', input)
if (not input.isCompleteTensor()):
if (output_size == ([1] * len(output_size))):
return (g.op('GlobalMaxPool', input), None)
raise NotImplementedError('[Adaptive pool]:input size not accessible')
dim = input.type().sizes()[2:]
if ((output_size == ([1] * len(output_size))) and (type == 'MaxPool')):
return (g.op('GlobalMaxPool', input), None)
s = [int((dim[i] / output_size[i])) for i in range(0, len(dim))]
k = [(dim[i] - ((output_size[i] - 1) * s[i])) for i in range(0, len(dim))]
if (type == 'MaxPool'):
return fn(g, input, k, k, ((0,) * len(dim)), ((1,) * len(dim)), False)
output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(s), ceil_mode_i=False)
return output
return symbolic_fn
|
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False):
from torch.onnx.symbolic_opset9 import full
if ((dtype is None) and self.isCompleteTensor()):
dtype = self.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return full(g, size, fill_value, dtype, layout, device, pin_memory)
|
@parse_args('v', 'v', 'i', 'i', 'i')
def grid_sampler(g, input, grid, interpolation_mode, padding_mode, align_corners=False):
return g.op('mmcv::grid_sampler', input, grid, interpolation_mode_i=interpolation_mode, padding_mode_i=padding_mode, align_corners_i=align_corners)
|
@parse_args('v', 'i')
def cummax(g, input, dim):
return g.op('mmcv::cummax', input, dim_i=dim, outputs=2)
|
@parse_args('v', 'i')
def cummin(g, input, dim):
return g.op('mmcv::cummin', input, dim_i=dim, outputs=2)
|
@parse_args('v', 'v', 'is')
def roll(g, input, shifts, dims):
from packaging import version
from torch.onnx.symbolic_opset9 import squeeze
input_shape = g.op('Shape', input)
need_flatten = (len(dims) == 0)
if need_flatten:
resize_shape = input_shape
input = g.op('Reshape', input, g.op('Constant', value_t=torch.LongTensor([1, (- 1)])))
input_shape = g.op('Shape', input)
dims = [1]
for (index, dim) in enumerate(dims):
end_size = sym_help._slice_helper(g, input_shape, axes=[0], ends=[(dim + 1)], starts=[dim])
shift_size = sym_help._slice_helper(g, shifts, axes=[0], ends=[(index + 1)], starts=[index])
slice_size = g.op('Sub', end_size, shift_size)
div_size = g.op('Div', slice_size, end_size)
slice_size = g.op('Sub', slice_size, g.op('Mul', end_size, div_size))
if (version.parse(torch.__version__) >= version.parse('1.7.0')):
end_size = squeeze(g, end_size, 0)
slice_size = squeeze(g, slice_size, 0)
else:
end_size = g.op('Squeeze', end_size)
slice_size = g.op('Squeeze', slice_size)
dim = torch.LongTensor([dim])
input_slice0 = sym_help._slice_helper(g, input, axes=dim, starts=torch.LongTensor([0]), ends=slice_size, dynamic_slice=True)
input_slice1 = sym_help._slice_helper(g, input, axes=dim, ends=end_size, starts=slice_size, dynamic_slice=True)
input = g.op('Concat', input_slice1, input_slice0, axis_i=dim)
if need_flatten:
input = g.op('Reshape', input, resize_shape)
return input
|
def register_extra_symbolics(opset=11):
(bright_style, reset_style) = ('\x1b[1m', '\x1b[0m')
(red_text, blue_text) = ('\x1b[31m', '\x1b[34m')
white_background = '\x1b[107m'
msg = ((white_background + bright_style) + red_text)
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += (blue_text + 'Welcome to use the unified model deployment toolbox ')
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
register_op('one_hot', one_hot, '', opset)
register_op('im2col', im2col, '', opset)
register_op('topk', topk, '', opset)
register_op('softmax', softmax, '', opset)
register_op('constant_pad_nd', constant_pad_nd, '', opset)
register_op('reflection_pad1d', reflection_pad1d, '', opset)
register_op('reflection_pad2d', reflection_pad2d, '', opset)
register_op('reflection_pad3d', reflection_pad3d, '', opset)
register_op('avg_pool1d', avg_pool1d, '', opset)
register_op('avg_pool2d', avg_pool2d, '', opset)
register_op('avg_pool3d', avg_pool3d, '', opset)
register_op('adaptive_avg_pool1d', adaptive_avg_pool1d, '', opset)
register_op('adaptive_avg_pool2d', adaptive_avg_pool2d, '', opset)
register_op('adaptive_avg_pool3d', adaptive_avg_pool3d, '', opset)
register_op('masked_select', masked_select, '', opset)
register_op('upsample_nearest1d', upsample_nearest1d, '', opset)
register_op('upsample_nearest2d', upsample_nearest2d, '', opset)
register_op('upsample_nearest3d', upsample_nearest3d, '', opset)
register_op('upsample_linear1d', upsample_linear1d, '', opset)
register_op('upsample_bilinear2d', upsample_bilinear2d, '', opset)
register_op('upsample_trilinear3d', upsample_trilinear3d, '', opset)
register_op('upsample_bicubic2d', upsample_bicubic2d, '', opset)
register_op('new_full', new_full, '', opset)
register_op('grid_sampler', grid_sampler, '', opset)
register_op('cummax', cummax, '', opset)
register_op('cummin', cummin, '', opset)
register_op('roll', roll, '', opset)
|
class ActiveRotatedFilterFunction(Function):
'Encoding the orientation information and generating orientation-\n sensitive features.\n\n The details are described in the paper `Align Deep Features for Oriented\n Object Detection <https://arxiv.org/abs/2008.09397>_`.\n '
@staticmethod
def forward(ctx, input, indices):
'\n Args:\n input (torch.Tensor): Input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n indices (torch.Tensor): Indices with shape\n [num_orientations, H, W, num_rotations].\n\n Returns:\n torch.Tensor: Refined features with shape [num_output_planes *\n num_rotations, num_input_planes * num_orientations, H, W].\n '
ctx.save_for_backward(input, indices)
(op, ip, o, h, w) = input.size()
(o, h, w, r) = indices.size()
output = input.new_zeros(((op * r), (ip * o), h, w))
ext_module.active_rotated_filter_forward(input, indices, output)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_out):
'\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [num_output_planes * num_rotations,\n num_input_planes * num_orientations, H, W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n '
(input, indices) = ctx.saved_tensors
grad_in = torch.zeros_like(input)
ext_module.active_rotated_filter_backward(grad_out, indices, grad_in)
return (grad_in, None)
|
class AssignScoreWithK(Function):
'Perform weighted sum to generate output features according to scores.\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/paconv_lib/src/gpu>`_.\n\n This is a memory-efficient CUDA implementation of assign_scores operation,\n which first transform all point features with weight bank, then assemble\n neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.\n\n See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for\n more detailed descriptions.\n\n Note:\n This implementation assumes using ``neighbor`` kernel input, which is\n (point_features - center_features, point_features).\n See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/\n pointnet2/paconv.py#L128 for more details.\n '
@staticmethod
def forward(ctx, scores, point_features, center_features, knn_idx, aggregate='sum'):
"\n Args:\n scores (torch.Tensor): (B, npoint, K, M), predicted scores to\n aggregate weight matrices in the weight bank.\n ``npoint`` is the number of sampled centers.\n ``K`` is the number of queried neighbors.\n ``M`` is the number of weight matrices in the weight bank.\n point_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed point features to be aggregated.\n center_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed center features to be aggregated.\n knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.\n We assume the first idx in each row is the idx of the center.\n aggregate (str, optional): Aggregation method.\n Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.\n\n Returns:\n torch.Tensor: (B, out_dim, npoint, K), the aggregated features.\n "
agg = {'sum': 0, 'avg': 1, 'max': 2}
(B, N, M, out_dim) = point_features.size()
(_, npoint, K, _) = scores.size()
output = point_features.new_zeros((B, out_dim, npoint, K))
ext_module.assign_score_withk_forward(point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), output, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg[aggregate])
ctx.save_for_backward(output, point_features, center_features, scores, knn_idx)
ctx.agg = agg[aggregate]
return output
@staticmethod
def backward(ctx, grad_out):
'\n Args:\n grad_out (torch.Tensor): (B, out_dim, npoint, K)\n\n Returns:\n tuple[torch.Tensor]: A tuple contains five elements. The first one\n is the gradient of ``scores`` whose shape is (B, npoint, K, M). The\n second is the gradient of ``point_features`` whose shape is\n (B, N, M, out_dim). The third is the gradient of\n ``center_features`` with the shape of (B, N, M, out_dim). The last\n two are ``None``.\n '
(_, point_features, center_features, scores, knn_idx) = ctx.saved_tensors
agg = ctx.agg
(B, N, M, out_dim) = point_features.size()
(_, npoint, K, _) = scores.size()
grad_point_features = point_features.new_zeros(point_features.shape)
grad_center_features = center_features.new_zeros(center_features.shape)
grad_scores = scores.new_zeros(scores.shape)
ext_module.assign_score_withk_backward(grad_out.contiguous(), point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), grad_point_features, grad_center_features, grad_scores, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg)
return (grad_scores, grad_point_features, grad_center_features, None, None)
|
class BallQuery(Function):
'Find nearby points in spherical space.'
@staticmethod
def forward(ctx, min_radius: float, max_radius: float, sample_num: int, xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor:
'\n Args:\n min_radius (float): minimum radius of the balls.\n max_radius (float): maximum radius of the balls.\n sample_num (int): maximum number of features in the balls.\n xyz (Tensor): (B, N, 3) xyz coordinates of the features.\n center_xyz (torch.Tensor): (B, npoint, 3) centers of the ball\n query.\n\n Returns:\n torch.Tensor: (B, npoint, nsample) tensor with the indices of the\n features that form the query balls.\n '
assert center_xyz.is_contiguous()
assert xyz.is_contiguous()
assert (min_radius < max_radius)
(B, N, _) = xyz.size()
npoint = center_xyz.size(1)
idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int)
ext_module.ball_query_forward(center_xyz, xyz, idx, b=B, n=N, m=npoint, min_radius=min_radius, max_radius=max_radius, nsample=sample_num)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return idx
@staticmethod
def backward(ctx, a=None):
return (None, None, None, None)
|
def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0):
'Calculate overlap between two set of bboxes.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n Args:\n bboxes1 (torch.Tensor): shape (m, 4) in <x1, y1, x2, y2> format or\n empty.\n bboxes2 (torch.Tensor): shape (n, 4) in <x1, y1, x2, y2> format or\n empty. If aligned is ``True``, then m and n must be equal.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (m, n) else (m, 1).\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> bbox_overlaps(bboxes1, bboxes2)\n tensor([[0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 1.0000],\n [0.0000, 0.0000, 0.0000]])\n\n Example:\n >>> empty = torch.FloatTensor([])\n >>> nonempty = torch.FloatTensor([\n >>> [0, 0, 10, 9],\n >>> ])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n '
mode_dict = {'iou': 0, 'iof': 1}
assert (mode in mode_dict.keys())
mode_flag = mode_dict[mode]
assert ((bboxes1.size((- 1)) == 4) or (bboxes1.size(0) == 0))
assert ((bboxes2.size((- 1)) == 4) or (bboxes2.size(0) == 0))
assert ((offset == 1) or (offset == 0))
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if aligned:
assert (rows == cols)
if ((rows * cols) == 0):
return (bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols))
if aligned:
ious = bboxes1.new_zeros(rows)
else:
ious = bboxes1.new_zeros((rows, cols))
ext_module.bbox_overlaps(bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset)
return ious
|
class BorderAlignFunction(Function):
@staticmethod
def symbolic(g, input, boxes, pool_size):
return g.op('mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size)
@staticmethod
def forward(ctx, input, boxes, pool_size):
ctx.pool_size = pool_size
ctx.input_shape = input.size()
assert (boxes.ndim == 3), 'boxes must be with shape [B, H*W, 4]'
assert (boxes.size(2) == 4), 'the last dimension of boxes must be (x1, y1, x2, y2)'
assert ((input.size(1) % 4) == 0), 'the channel for input feature must be divisible by factor 4'
output_shape = (input.size(0), (input.size(1) // 4), boxes.size(1), 4)
output = input.new_zeros(output_shape)
argmax_idx = input.new_zeros(output_shape).to(torch.int)
ext_module.border_align_forward(input, boxes, output, argmax_idx, pool_size=ctx.pool_size)
ctx.save_for_backward(boxes, argmax_idx)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(boxes, argmax_idx) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
grad_output = grad_output.contiguous()
ext_module.border_align_backward(grad_output, boxes, argmax_idx, grad_input, pool_size=ctx.pool_size)
return (grad_input, None, None)
|
class BorderAlign(nn.Module):
"Border align pooling layer.\n\n Applies border_align over the input feature based on predicted bboxes.\n The details were described in the paper\n `BorderDet: Border Feature for Dense Object Detection\n <https://arxiv.org/abs/2007.11056>`_.\n\n For each border line (e.g. top, left, bottom or right) of each box,\n border_align does the following:\n\n 1. uniformly samples ``pool_size`` +1 positions on this line, involving\n the start and end points.\n 2. the corresponding features on these points are computed by bilinear\n interpolation.\n 3. max pooling over all the ``pool_size`` +1 positions are used for\n computing pooled feature.\n\n Args:\n pool_size (int): number of positions sampled over the boxes' borders\n (e.g. top, bottom, left, right).\n "
def __init__(self, pool_size):
super(BorderAlign, self).__init__()
self.pool_size = pool_size
def forward(self, input, boxes):
'\n Args:\n input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),\n [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,\n right features respectively.\n boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).\n\n Returns:\n torch.Tensor: Pooled features with shape [N,C,H*W,4]. The order is\n (top,left,bottom,right) for the last dimension.\n '
return border_align(input, boxes, self.pool_size)
def __repr__(self):
s = self.__class__.__name__
s += f'(pool_size={self.pool_size})'
return s
|
def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False, clockwise=True):
'Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha+0.5h\\sin\\alpha\n \\\\\n y_{center}-0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha-0.5h\\sin\\alpha\n \\\\\n y_{center}+0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n '
assert (mode in ['iou', 'iof'])
mode_dict = {'iou': 0, 'iof': 1}
mode_flag = mode_dict[mode]
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if aligned:
ious = bboxes1.new_zeros(rows)
else:
ious = bboxes1.new_zeros((rows * cols))
if (not clockwise):
flip_mat = bboxes1.new_ones(bboxes1.shape[(- 1)])
flip_mat[(- 1)] = (- 1)
bboxes1 = (bboxes1 * flip_mat)
bboxes2 = (bboxes2 * flip_mat)
bboxes1 = bboxes1.contiguous()
bboxes2 = bboxes2.contiguous()
ext_module.box_iou_rotated(bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)
if (not aligned):
ious = ious.view(rows, cols)
return ious
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.