code stringlengths 17 6.64M |
|---|
def rotated_feature_align(features, best_rbboxes, spatial_scale=(1 / 8), points=1):
return RotatedFeatureAlignFunction.apply(features, best_rbboxes, spatial_scale, points)
|
@CONV_LAYERS.register_module(name='SAC')
class SAConv2d(ConvAWS2d):
"SAC (Switchable Atrous Convolution)\n\n This is an implementation of `DetectoRS: Detecting Objects with Recursive\n Feature Pyramid and Switchable Atrous Convolution\n <https://arxiv.org/abs/2006.02334>`_.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n use_deform: If ``True``, replace convolution with deformable\n convolution. Default: ``False``.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, use_deform=False):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.use_deform = use_deform
self.switch = nn.Conv2d(self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
self.pre_context = nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, bias=True)
self.post_context = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=True)
if self.use_deform:
self.offset_s = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.offset_l = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.init_weights()
def init_weights(self):
constant_init(self.switch, 0, bias=1)
self.weight_diff.data.zero_()
constant_init(self.pre_context, 0)
constant_init(self.post_context, 0)
if self.use_deform:
constant_init(self.offset_s, 0)
constant_init(self.offset_l, 0)
def forward(self, x):
avg_x = F.adaptive_avg_pool2d(x, output_size=1)
avg_x = self.pre_context(avg_x)
avg_x = avg_x.expand_as(x)
x = (x + avg_x)
avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
switch = self.switch(avg_x)
weight = self._get_weight(self.weight)
zero_bias = torch.zeros(self.out_channels, device=weight.device, dtype=weight.dtype)
if self.use_deform:
offset = self.offset_s(avg_x)
out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_s = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_s = super()._conv_forward(x, weight, zero_bias)
else:
out_s = super()._conv_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(((3 * p) for p in self.padding))
self.dilation = tuple(((3 * d) for d in self.dilation))
weight = (weight + self.weight_diff)
if self.use_deform:
offset = self.offset_l(avg_x)
out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_l = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_l = super()._conv_forward(x, weight, zero_bias)
else:
out_l = super()._conv_forward(x, weight)
out = ((switch * out_s) + ((1 - switch) * out_l))
self.padding = ori_p
self.dilation = ori_d
avg_x = F.adaptive_avg_pool2d(out, output_size=1)
avg_x = self.post_context(avg_x)
avg_x = avg_x.expand_as(out)
out = (out + avg_x)
return out
|
def _calculate_fan_in_and_fan_out_hwio(tensor):
dimensions = tensor.ndimension()
if (dimensions < 2):
raise ValueError('fan in and fan out can not be computed for tensorwith fewer than 2 dimensions')
if (dimensions == 2):
fan_in = tensor.size((- 2))
fan_out = tensor.size((- 1))
else:
num_input_fmaps = tensor.size((- 2))
num_output_fmaps = tensor.size((- 1))
receptive_field_size = 1
if (tensor.dim() > 2):
receptive_field_size = tensor[(..., 0, 0)].numel()
fan_in = (num_input_fmaps * receptive_field_size)
fan_out = (num_output_fmaps * receptive_field_size)
return (fan_in, fan_out)
|
class SparseConvolution(SparseModule):
def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False):
super(SparseConvolution, self).__init__()
assert (groups == 1)
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(output_padding, (list, tuple))):
output_padding = ([output_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
self.ndim = ndim
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.conv1x1 = (np.prod(kernel_size) == 1)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.inverse = inverse
self.output_padding = output_padding
self.groups = groups
self.subm = subm
self.indice_key = indice_key
self.fused_bn = fused_bn
self.weight = Parameter(torch.Tensor(*kernel_size, in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = _calculate_fan_in_and_fan_out_hwio(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
if self.transposed:
out_spatial_shape = ops.get_deconv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding)
else:
out_spatial_shape = ops.get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
if self.conv1x1:
features = torch.mm(input.features, self.weight.view(self.in_channels, self.out_channels))
if (self.bias is not None):
features += self.bias
out_tensor = SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
data = input.find_indice_pair(self.indice_key)
if self.inverse:
assert ((data is not None) and (self.indice_key is not None))
(_, outids, indice_pairs, indice_pair_num, out_spatial_shape) = data
assert (indice_pairs.shape[0] == np.prod(self.kernel_size)), 'inverse conv must have same kernel size as its couple conv'
elif ((self.indice_key is not None) and (data is not None)):
(outids, _, indice_pairs, indice_pair_num, _) = data
else:
(outids, indice_pairs, indice_pair_num) = ops.get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding, self.subm, self.transposed, grid=input.grid)
input.indice_dict[self.indice_key] = (outids, indices, indice_pairs, indice_pair_num, spatial_shape)
if self.fused_bn:
assert (self.bias is not None)
out_features = ops.fused_indice_conv(features, self.weight, self.bias, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.inverse, self.subm)
else:
if self.subm:
out_features = Fsp.indice_subm_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
elif self.inverse:
out_features = Fsp.indice_inverse_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
else:
out_features = Fsp.indice_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
if (self.bias is not None):
out_features += self.bias
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
@CONV_LAYERS.register_module()
class SparseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
class SparseConvFunction(Function):
'Sparse Convolution.\n\n Please refer to `SECOND <https://www.mdpi.com/1424-8220/18/10/3337>`_ for\n more details.\n '
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False)
return (input_bp, filters_bp, None, None, None)
|
class SparseInverseConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, True, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, True, False)
return (input_bp, filters_bp, None, None, None)
|
class SubMConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, True)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, True)
return (input_bp, filters_bp, None, None, None)
|
class SparseMaxPoolFunction(Function):
@staticmethod
def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from sparse maxpooling.\n '
out = ops.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out)
ctx.save_for_backward(indice_pairs, indice_pair_num, features, out)
return out
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, out) = ctx.saved_tensors
input_bp = ops.indice_maxpool_backward(features, out, grad_output, indice_pairs, indice_pair_num)
return (input_bp, None, None, None)
|
def is_spconv_module(module):
spconv_modules = (SparseModule,)
return isinstance(module, spconv_modules)
|
def is_sparse_conv(module):
from .sparse_conv import SparseConvolution
return isinstance(module, SparseConvolution)
|
def _mean_update(vals, m_vals, t):
outputs = []
if (not isinstance(vals, list)):
vals = [vals]
if (not isinstance(m_vals, list)):
m_vals = [m_vals]
for (val, m_val) in zip(vals, m_vals):
output = (((t / float((t + 1))) * m_val) + ((1 / float((t + 1))) * val))
outputs.append(output)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs
|
class SparseModule(nn.Module):
'place holder, All module subclass from this will take sptensor in\n SparseSequential.'
pass
|
class SparseSequential(SparseModule):
"A sequential container.\n Modules will be added to it in the order they are passed in the\n constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, given is a small example::\n\n Example:\n >>> # using Sequential:\n >>> from mmcv.ops import SparseSequential\n >>> model = SparseSequential(\n SparseConv2d(1,20,5),\n nn.ReLU(),\n SparseConv2d(20,64,5),\n nn.ReLU()\n )\n\n >>> # using Sequential with OrderedDict\n >>> model = SparseSequential(OrderedDict([\n ('conv1', SparseConv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', SparseConv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n >>> # using Sequential with kwargs(python 3.6+)\n >>> model = SparseSequential(\n conv1=SparseConv2d(1,20,5),\n relu1=nn.ReLU(),\n conv2=SparseConv2d(20,64,5),\n relu2=nn.ReLU()\n )\n "
def __init__(self, *args, **kwargs):
super(SparseSequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
for (name, module) in kwargs.items():
if (sys.version_info < (3, 6)):
raise ValueError('kwargs only supported in py36+')
if (name in self._modules):
raise ValueError('name exists.')
self.add_module(name, module)
self._sparity_dict = {}
def __getitem__(self, idx):
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
@property
def sparity_dict(self):
return self._sparity_dict
def add(self, module, name=None):
if (name is None):
name = str(len(self._modules))
if (name in self._modules):
raise KeyError('name exists')
self.add_module(name, module)
def forward(self, input):
for (k, module) in self._modules.items():
if is_spconv_module(module):
assert isinstance(input, SparseConvTensor)
self._sparity_dict[k] = input.sparity
input = module(input)
elif isinstance(input, SparseConvTensor):
if (input.indices.shape[0] != 0):
input.features = module(input.features)
else:
input = module(input)
return input
def fused(self):
from .sparse_conv import SparseConvolution
mods = [v for (k, v) in self._modules.items()]
fused_mods = []
idx = 0
while (idx < len(mods)):
if is_sparse_conv(mods[idx]):
if ((idx < (len(mods) - 1)) and isinstance(mods[(idx + 1)], nn.BatchNorm1d)):
new_module = SparseConvolution(ndim=mods[idx].ndim, in_channels=mods[idx].in_channels, out_channels=mods[idx].out_channels, kernel_size=mods[idx].kernel_size, stride=mods[idx].stride, padding=mods[idx].padding, dilation=mods[idx].dilation, groups=mods[idx].groups, bias=True, subm=mods[idx].subm, output_padding=mods[idx].output_padding, transposed=mods[idx].transposed, inverse=mods[idx].inverse, indice_key=mods[idx].indice_key, fused_bn=True)
new_module.load_state_dict(mods[idx].state_dict(), False)
new_module.to(mods[idx].weight.device)
conv = new_module
bn = mods[(idx + 1)]
conv.bias.data.zero_()
conv.weight.data[:] = ((conv.weight.data * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps))
conv.bias.data[:] = ((((conv.bias.data - bn.running_mean) * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps)) + bn.bias.data)
fused_mods.append(conv)
idx += 2
else:
fused_mods.append(mods[idx])
idx += 1
else:
fused_mods.append(mods[idx])
idx += 1
return SparseSequential(*fused_mods)
|
class ToDense(SparseModule):
'convert SparseConvTensor to NCHW dense tensor.'
def forward(self, x: SparseConvTensor):
return x.dense()
|
class RemoveGrid(SparseModule):
'remove pre-allocated grid buffer.'
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (((((input_size[i] + (2 * padding[i])) - (dilation[i] * (kernel_size[i] - 1))) - 1) // stride[i]) + 1)
if (kernel_size[i] == (- 1)):
output_size.append(1)
else:
output_size.append(size)
return output_size
|
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding):
ndim = len(input_size)
output_size = []
for i in range(ndim):
if (kernel_size[i] == (- 1)):
raise ValueError("deconv don't support kernel_size < 0")
size = (((((input_size[i] - 1) * stride[i]) - (2 * padding[i])) + kernel_size[i]) + output_padding[i])
output_size.append(size)
return output_size
|
def get_indice_pairs(indices, batch_size, spatial_shape, ksize=3, stride=1, padding=0, dilation=1, out_padding=0, subm=False, transpose=False, grid=None):
ndim = (indices.shape[1] - 1)
if (not isinstance(ksize, (list, tuple))):
ksize = ([ksize] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(out_padding, (list, tuple))):
out_padding = ([out_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
if (not subm):
if transpose:
out_shape = get_deconv_output_size(spatial_shape, ksize, stride, padding, dilation, out_padding)
else:
out_shape = get_conv_output_size(spatial_shape, ksize, stride, padding, dilation)
else:
out_shape = spatial_shape
if (grid is None):
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_forward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_forward
elif (ndim == 4):
get_indice_pairs_func = ext_module.get_indice_pairs_4d_forward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
else:
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_backward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_backward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, grid, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
|
def indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_forward(features, filters, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
else:
raise NotImplementedError
|
def fused_indice_conv(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm):
if ((features.dtype == torch.half) or (filters.dtypes == torch.float32)):
func = ext_module.fused_indice_conv_forward
else:
raise NotImplementedError
return func(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
|
def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
else:
raise NotImplementedError
|
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_forward(features, indice_pairs, indice_pair_num, num_activate_out)
else:
raise NotImplementedError
|
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num)
else:
raise NotImplementedError
|
class SparseMaxPool(SparseModule):
def __init__(self, ndim, kernel_size, stride=1, padding=0, dilation=1, subm=False):
super(SparseMaxPool, self).__init__()
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
self.ndim = ndim
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.subm = subm
self.dilation = dilation
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
out_spatial_shape = get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
(outids, indice_pairs, indice_pairs_num) = get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, 0, self.subm)
out_features = indice_maxpool(features, indice_pairs.to(device), indice_pairs_num.to(device), outids.shape[0])
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
class SparseMaxPool2d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool2d, self).__init__(2, kernel_size, stride, padding, dilation)
|
class SparseMaxPool3d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool3d, self).__init__(3, kernel_size, stride, padding, dilation)
|
class SyncBatchNormFunction(Function):
@staticmethod
def symbolic(g, input, running_mean, running_var, weight, bias, momentum, eps, group, group_size, stats_mode):
return g.op('mmcv::MMCVSyncBatchNorm', input, running_mean, running_var, weight, bias, momentum_f=momentum, eps_f=eps, group_i=group, group_size_i=group_size, stats_mode=stats_mode)
@staticmethod
def forward(self, input, running_mean, running_var, weight, bias, momentum, eps, group, group_size, stats_mode):
self.momentum = momentum
self.eps = eps
self.group = group
self.group_size = group_size
self.stats_mode = stats_mode
assert isinstance(input, (torch.HalfTensor, torch.FloatTensor, torch.cuda.HalfTensor, torch.cuda.FloatTensor)), f'only support Half or Float Tensor, but {input.type()}'
output = torch.zeros_like(input)
input3d = input.flatten(start_dim=2)
output3d = output.view_as(input3d)
num_channels = input3d.size(1)
mean = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
var = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
norm = torch.zeros_like(input3d, dtype=torch.float, device=input3d.device)
std = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
batch_size = input3d.size(0)
if (batch_size > 0):
ext_module.sync_bn_forward_mean(input3d, mean)
batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
else:
batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
vec = torch.cat([mean, batch_flag])
if (self.stats_mode == 'N'):
vec *= batch_size
if (self.group_size > 1):
dist.all_reduce(vec, group=self.group)
total_batch = vec[(- 1)].detach()
mean = vec[:num_channels]
if (self.stats_mode == 'default'):
mean = (mean / self.group_size)
elif (self.stats_mode == 'N'):
mean = (mean / total_batch.clamp(min=1))
else:
raise NotImplementedError
if (batch_size > 0):
ext_module.sync_bn_forward_var(input3d, mean, var)
if (self.stats_mode == 'N'):
var *= batch_size
if (self.group_size > 1):
dist.all_reduce(var, group=self.group)
if (self.stats_mode == 'default'):
var /= self.group_size
elif (self.stats_mode == 'N'):
var /= total_batch.clamp(min=1)
else:
raise NotImplementedError
update_flag = total_batch.clamp(max=1)
momentum = (update_flag * self.momentum)
ext_module.sync_bn_forward_output(input3d, mean, var, weight, bias, running_mean, running_var, norm, std, output3d, eps=self.eps, momentum=momentum, group_size=self.group_size)
self.save_for_backward(norm, std, weight)
return output
@staticmethod
@once_differentiable
def backward(self, grad_output):
(norm, std, weight) = self.saved_tensors
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(weight)
grad_input = torch.zeros_like(grad_output)
grad_output3d = grad_output.flatten(start_dim=2)
grad_input3d = grad_input.view_as(grad_output3d)
batch_size = grad_input3d.size(0)
if (batch_size > 0):
ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight, grad_bias)
if (self.group_size > 1):
dist.all_reduce(grad_weight, group=self.group)
dist.all_reduce(grad_bias, group=self.group)
grad_weight /= self.group_size
grad_bias /= self.group_size
if (batch_size > 0):
ext_module.sync_bn_backward_data(grad_output3d, weight, grad_weight, grad_bias, norm, std, grad_input3d)
return (grad_input, None, None, grad_weight, grad_bias, None, None, None, None, None)
|
@NORM_LAYERS.register_module(name='MMSyncBN')
class SyncBatchNorm(Module):
"Synchronized Batch Normalization.\n\n Args:\n num_features (int): number of features/chennels in input tensor\n eps (float, optional): a value added to the denominator for numerical\n stability. Defaults to 1e-5.\n momentum (float, optional): the value used for the running_mean and\n running_var computation. Defaults to 0.1.\n affine (bool, optional): whether to use learnable affine parameters.\n Defaults to True.\n track_running_stats (bool, optional): whether to track the running\n mean and variance during training. When set to False, this\n module does not track such statistics, and initializes statistics\n buffers ``running_mean`` and ``running_var`` as ``None``. When\n these buffers are ``None``, this module always uses batch\n statistics in both training and eval modes. Defaults to True.\n group (int, optional): synchronization of stats happen within\n each process group individually. By default it is synchronization\n across the whole world. Defaults to None.\n stats_mode (str, optional): The statistical mode. Available options\n includes ``'default'`` and ``'N'``. Defaults to 'default'.\n When ``stats_mode=='default'``, it computes the overall statistics\n using those from each worker with equal weight, i.e., the\n statistics are synchronized and simply divied by ``group``. This\n mode will produce inaccurate statistics when empty tensors occur.\n When ``stats_mode=='N'``, it compute the overall statistics using\n the total number of batches in each worker ignoring the number of\n group, i.e., the statistics are synchronized and then divied by\n the total batch ``N``. This mode is beneficial when empty tensors\n occur during training, as it average the total mean by the real\n number of batch.\n "
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, group=None, stats_mode='default'):
super(SyncBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
group = (dist.group.WORLD if (group is None) else group)
self.group = group
self.group_size = dist.get_world_size(group)
assert (stats_mode in ['default', 'N']), f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
self.stats_mode = stats_mode
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_buffer('running_mean', None)
self.register_buffer('running_var', None)
self.register_buffer('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, input):
if (input.dim() < 2):
raise ValueError(f'expected at least 2D input, got {input.dim()}D input')
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if (self.training and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
if (self.training or (not self.track_running_stats)):
return SyncBatchNormFunction.apply(input, self.running_mean, self.running_var, self.weight, self.bias, exponential_average_factor, self.eps, self.group, self.group_size, self.stats_mode)
else:
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, exponential_average_factor, self.eps)
def __repr__(self):
s = self.__class__.__name__
s += f'({self.num_features}, '
s += f'eps={self.eps}, '
s += f'momentum={self.momentum}, '
s += f'affine={self.affine}, '
s += f'track_running_stats={self.track_running_stats}, '
s += f'group_size={self.group_size},'
s += f'stats_mode={self.stats_mode})'
return s
|
class ThreeInterpolate(Function):
'Performs weighted linear interpolation on 3 features.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
'\n Args:\n features (torch.Tensor): (B, C, M) Features descriptors to be\n interpolated.\n indices (torch.Tensor): (B, n, 3) indices of three nearest\n neighbor features for the target features.\n weight (torch.Tensor): (B, n, 3) weights of three nearest\n neighbor features for the target features.\n\n Returns:\n torch.Tensor: (B, C, N) tensor of the interpolated features\n '
assert features.is_contiguous()
assert indices.is_contiguous()
assert weight.is_contiguous()
(B, c, m) = features.size()
n = indices.size(1)
ctx.three_interpolate_for_backward = (indices, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
ext_module.three_interpolate_forward(features, indices, weight, output, b=B, c=c, m=m, n=n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n Args:\n grad_out (torch.Tensor): (B, C, N) tensor with gradients of outputs\n\n Returns:\n torch.Tensor: (B, C, M) tensor with gradients of features\n '
(idx, weight, m) = ctx.three_interpolate_for_backward
(B, c, n) = grad_out.size()
grad_features = torch.cuda.FloatTensor(B, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
ext_module.three_interpolate_backward(grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m)
return (grad_features, None, None)
|
class ThreeNN(Function):
'Find the top-3 nearest neighbors of the target set from the source set.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, target: torch.Tensor, source: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n Args:\n target (torch.Tensor): shape (B, N, 3), points set that needs to\n find the nearest neighbors.\n source (torch.Tensor): shape (B, M, 3), points set that is used\n to find the nearest neighbors of points in target set.\n\n Returns:\n torch.Tensor: shape (B, N, 3), L2 distance of each point in target\n set to their corresponding top three nearest neighbors.\n '
target = target.contiguous()
source = source.contiguous()
(B, N, _) = target.size()
m = source.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return (torch.sqrt(dist2), idx)
@staticmethod
def backward(ctx, a=None, b=None):
return (None, None)
|
class TINShiftFunction(Function):
@staticmethod
def forward(ctx, input, shift):
C = input.size(2)
num_segments = shift.size(1)
if (((C // num_segments) <= 0) or ((C % num_segments) != 0)):
raise ValueError(f'C should be a multiple of num_segments, but got C={C} and num_segments={num_segments}.')
ctx.save_for_backward(shift)
out = torch.zeros_like(input)
ext_module.tin_shift_forward(input, shift, out)
return out
@staticmethod
def backward(ctx, grad_output):
shift = ctx.saved_tensors[0]
data_grad_input = grad_output.new(*grad_output.size()).zero_()
shift_grad_input = shift.new(*shift.size()).zero_()
ext_module.tin_shift_backward(grad_output, shift, data_grad_input)
return (data_grad_input, shift_grad_input)
|
class TINShift(nn.Module):
'Temporal Interlace Shift.\n\n Temporal Interlace shift is a differentiable temporal-wise frame shifting\n which is proposed in "Temporal Interlacing Network"\n\n Please refer to `Temporal Interlacing Network\n <https://arxiv.org/abs/2001.06499>`_ for more details.\n\n Code is modified from https://github.com/mit-han-lab/temporal-shift-module\n '
def forward(self, input, shift):
'Perform temporal interlace shift.\n\n Args:\n input (torch.Tensor): Feature map with shape\n [N, num_segments, C, H * W].\n shift (torch.Tensor): Shift tensor with shape [N, num_segments].\n\n Returns:\n Feature map after temporal interlace shift.\n '
return tin_shift(input, shift)
|
class _Voxelization(Function):
@staticmethod
def forward(ctx, points, voxel_size, coors_range, max_points=35, max_voxels=20000, deterministic=True):
'Convert kitti points(N, >=3) to voxels.\n\n Args:\n points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points\n and points[:, 3:] contain other information like reflectivity.\n voxel_size (tuple or float): The size of voxel with the shape of\n [3].\n coors_range (tuple or float): The coordinate range of voxel with\n the shape of [6].\n max_points (int, optional): maximum points contained in a voxel. if\n max_points=-1, it means using dynamic_voxelize. Default: 35.\n max_voxels (int, optional): maximum voxels this function create.\n for second, 20000 is a good choice. Users should shuffle points\n before call this function because max_voxels may drop points.\n Default: 20000.\n deterministic: bool. whether to invoke the non-deterministic\n version of hard-voxelization implementations. non-deterministic\n version is considerablly fast but is not deterministic. only\n affects hard voxelization. default True. for more information\n of this argument and the implementation insights, please refer\n to the following links:\n https://github.com/open-mmlab/mmdetection3d/issues/894\n https://github.com/open-mmlab/mmdetection3d/pull/904\n it is an experimental feature and we will appreciate it if\n you could share with us the failing cases.\n\n Returns:\n tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains three\n elements. The first one is the output voxels with the shape of\n [M, max_points, n_dim], which only contain points and returned\n when max_points != -1. The second is the voxel coordinates with\n shape of [M, 3]. The last is number of point per voxel with the\n shape of [M], which only returned when max_points != -1.\n '
if ((max_points == (- 1)) or (max_voxels == (- 1))):
coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int)
ext_module.dynamic_voxelize_forward(points, torch.tensor(voxel_size, dtype=torch.float), torch.tensor(coors_range, dtype=torch.float), coors, NDim=3)
return coors
else:
voxels = points.new_zeros(size=(max_voxels, max_points, points.size(1)))
coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int)
num_points_per_voxel = points.new_zeros(size=(max_voxels,), dtype=torch.int)
voxel_num = torch.zeros(size=(), dtype=torch.long)
ext_module.hard_voxelize_forward(points, torch.tensor(voxel_size, dtype=torch.float), torch.tensor(coors_range, dtype=torch.float), voxels, coors, num_points_per_voxel, voxel_num, max_points=max_points, max_voxels=max_voxels, NDim=3, deterministic=deterministic)
voxels_out = voxels[:voxel_num]
coors_out = coors[:voxel_num]
num_points_per_voxel_out = num_points_per_voxel[:voxel_num]
return (voxels_out, coors_out, num_points_per_voxel_out)
|
class Voxelization(nn.Module):
'Convert kitti points(N, >=3) to voxels.\n\n Please refer to `Point-Voxel CNN for Efficient 3D Deep Learning\n <https://arxiv.org/abs/1907.03739>`_ for more details.\n\n Args:\n voxel_size (tuple or float): The size of voxel with the shape of [3].\n point_cloud_range (tuple or float): The coordinate range of voxel with\n the shape of [6].\n max_num_points (int): maximum points contained in a voxel. if\n max_points=-1, it means using dynamic_voxelize.\n max_voxels (int, optional): maximum voxels this function create.\n for second, 20000 is a good choice. Users should shuffle points\n before call this function because max_voxels may drop points.\n Default: 20000.\n '
def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000, deterministic=True):
'\n Args:\n voxel_size (list): list [x, y, z] size of three dimension\n point_cloud_range (list):\n [x_min, y_min, z_min, x_max, y_max, z_max]\n max_num_points (int): max number of points per voxel\n max_voxels (tuple or int): max number of voxels in\n (training, testing) time\n deterministic: bool. whether to invoke the non-deterministic\n version of hard-voxelization implementations. non-deterministic\n version is considerablly fast but is not deterministic. only\n affects hard voxelization. default True. for more information\n of this argument and the implementation insights, please refer\n to the following links:\n https://github.com/open-mmlab/mmdetection3d/issues/894\n https://github.com/open-mmlab/mmdetection3d/pull/904\n it is an experimental feature and we will appreciate it if\n you could share with us the failing cases.\n '
super().__init__()
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.max_num_points = max_num_points
if isinstance(max_voxels, tuple):
self.max_voxels = max_voxels
else:
self.max_voxels = _pair(max_voxels)
self.deterministic = deterministic
point_cloud_range = torch.tensor(point_cloud_range, dtype=torch.float32)
voxel_size = torch.tensor(voxel_size, dtype=torch.float32)
grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size)
grid_size = torch.round(grid_size).long()
input_feat_shape = grid_size[:2]
self.grid_size = grid_size
self.pcd_shape = [*input_feat_shape, 1][::(- 1)]
def forward(self, input):
if self.training:
max_voxels = self.max_voxels[0]
else:
max_voxels = self.max_voxels[1]
return voxelization(input, self.voxel_size, self.point_cloud_range, self.max_num_points, max_voxels, self.deterministic)
def __repr__(self):
s = (self.__class__.__name__ + '(')
s += ('voxel_size=' + str(self.voxel_size))
s += (', point_cloud_range=' + str(self.point_cloud_range))
s += (', max_num_points=' + str(self.max_num_points))
s += (', max_voxels=' + str(self.max_voxels))
s += (', deterministic=' + str(self.deterministic))
s += ')'
return s
|
def scatter(input, devices, streams=None):
'Scatters tensor across multiple GPUs.'
if (streams is None):
streams = ([None] * len(devices))
if isinstance(input, list):
chunk_size = (((len(input) - 1) // len(devices)) + 1)
outputs = [scatter(input[i], [devices[(i // chunk_size)]], [streams[(i // chunk_size)]]) for i in range(len(input))]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
stream = (streams[0] if (output.numel() > 0) else None)
if (devices != [(- 1)]):
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception(f'Unknown type {type(input)}.')
|
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = (len(output) // len(devices))
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[((i * chunk_size) + j)], [devices[i]], [streams[i]])
elif isinstance(output, torch.Tensor):
if (output.numel() != 0):
with torch.cuda.device(devices[0]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[0])
output.record_stream(main_stream)
else:
raise Exception(f'Unknown type {type(output)}.')
|
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if (input_device != (- 1)):
return input_device
return (- 1)
elif isinstance(input, torch.Tensor):
return (input.get_device() if input.is_cuda else (- 1))
else:
raise Exception(f'Unknown type {type(input)}.')
|
class Scatter():
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if ((input_device == (- 1)) and (target_gpus != [(- 1)])):
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
if (streams is not None):
synchronize_stream(outputs, target_gpus, streams)
return (tuple(outputs) if isinstance(outputs, list) else (outputs,))
|
def collate(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n '
if (not isinstance(batch, Sequence)):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if (batch[i].pad_dims is not None):
ndim = batch[i].dim()
assert (ndim > batch[i].pad_dims)
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = batch[i].size((- dim))
for sample in batch[i:(i + samples_per_gpu)]:
for dim in range(0, (ndim - batch[i].pad_dims)):
assert (batch[i].size(dim) == sample.size(dim))
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
padded_samples = []
for sample in batch[i:(i + samples_per_gpu)]:
pad = [0 for _ in range((batch[i].pad_dims * 2))]
for dim in range(1, (batch[i].pad_dims + 1)):
pad[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
padded_samples.append(F.pad(sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif (batch[i].pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch[i:(i + samples_per_gpu)]]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]}
else:
return default_collate(batch)
|
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (not isinstance(args[0].data, torch.Tensor)):
raise AttributeError(f'{args[0].__class__.__name__} has no attribute {func.__name__} for type {args[0].datatype}')
return func(*args, **kwargs)
return wrapper
|
class DataContainer():
'A container for any type of objects.\n\n Typically tensors will be stacked in the collate function and sliced along\n some dimension in the scatter function. This behavior has some limitations.\n 1. All tensors have to be the same size.\n 2. Types are limited (numpy array or Tensor).\n\n We design `DataContainer` and `MMDataParallel` to overcome these\n limitations. The behavior can be either of the following.\n\n - copy to GPU, pad all tensors to the same size and stack them\n - copy to GPU without stacking\n - leave the objects as is and pass it to the model\n - pad_dims specifies the number of last few dimensions to do padding\n '
def __init__(self, data, stack=False, padding_value=0, cpu_only=False, pad_dims=2):
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert (pad_dims in [None, 1, 2, 3])
self._pad_dims = pad_dims
def __repr__(self):
return f'{self.__class__.__name__}({repr(self.data)})'
def __len__(self):
return len(self._data)
@property
def data(self):
return self._data
@property
def datatype(self):
if isinstance(self.data, torch.Tensor):
return self.data.type()
else:
return type(self.data)
@property
def cpu_only(self):
return self._cpu_only
@property
def stack(self):
return self._stack
@property
def padding_value(self):
return self._padding_value
@property
def pad_dims(self):
return self._pad_dims
@assert_tensor_type
def size(self, *args, **kwargs):
return self.data.size(*args, **kwargs)
@assert_tensor_type
def dim(self):
return self.data.dim()
|
class MMDataParallel(DataParallel):
'The DataParallel module that supports DataContainer.\n\n MMDataParallel has two main differences with PyTorch DataParallel:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data during both GPU and CPU inference.\n - It implement two more APIs ``train_step()`` and ``val_step()``.\n\n .. warning::\n MMDataParallel only supports single GPU training, if you need to\n train with multiple GPUs, please use MMDistributedDataParallel\n instead. If you have multiple GPUs and you just want to use\n MMDataParallel, you can set the environment variable\n ``CUDA_VISIBLE_DEVICES=0`` or instantiate ``MMDataParallel`` with\n ``device_ids=[0]``.\n\n Args:\n module (:class:`nn.Module`): Module to be encapsulated.\n device_ids (list[int]): Device IDS of modules to be scattered to.\n Defaults to None when GPU is not available.\n output_device (str | int): Device ID for output. Defaults to None.\n dim (int): Dimension used to scatter the data. Defaults to 0.\n '
def __init__(self, *args, dim=0, **kwargs):
super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs)
self.dim = dim
def forward(self, *inputs, **kwargs):
'Override the original forward function.\n\n The main difference lies in the CPU inference where the data in\n :class:`DataContainers` will still be gathered.\n '
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module(*inputs[0], **kwargs[0])
else:
return super().forward(*inputs, **kwargs)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module.train_step(*inputs[0], **kwargs[0])
assert (len(self.device_ids) == 1), 'MMDataParallel only supports single GPU training, if you need to train with multiple GPUs, please use MMDistributedDataParallel instead.'
for t in chain(self.module.parameters(), self.module.buffers()):
if (t.device != self.src_device_obj):
raise RuntimeError(f'module must have its parameters and buffers on device {self.src_device_obj} (device_ids[0]) but found one of them on device: {t.device}')
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
return self.module.train_step(*inputs[0], **kwargs[0])
def val_step(self, *inputs, **kwargs):
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module.val_step(*inputs[0], **kwargs[0])
assert (len(self.device_ids) == 1), 'MMDataParallel only supports single GPU training, if you need to train with multiple GPUs, please use MMDistributedDataParallel instead.'
for t in chain(self.module.parameters(), self.module.buffers()):
if (t.device != self.src_device_obj):
raise RuntimeError(f'module must have its parameters and buffers on device {self.src_device_obj} (device_ids[0]) but found one of them on device: {t.device}')
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
return self.module.val_step(*inputs[0], **kwargs[0])
|
class MMDistributedDataParallel(DistributedDataParallel):
'The DDP module that supports DataContainer.\n\n MMDDP has two main differences with PyTorch DDP:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data.\n - It implement two APIs ``train_step()`` and ``val_step()``.\n '
def to_kwargs(self, inputs, kwargs, device_id):
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
'train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if getattr(self, 'require_forward_param_sync', True):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', True)):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
'val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if getattr(self, 'require_forward_param_sync', True):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', True)):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output
|
@MODULE_WRAPPERS.register_module()
class MMDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(MMDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buffers
self.broadcast_bucket_size = ((bucket_cap_mb * 1024) * 1024)
self._sync_params()
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
module_states = list(self.module.state_dict().values())
if (len(module_states) > 0):
self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size)
if self.broadcast_buffers:
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) < digit_version('1.0'))):
buffers = [b.data for b in self.module._all_buffers()]
else:
buffers = [b.data for b in self.module.buffers()]
if (len(buffers) > 0):
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
|
def scatter(inputs, target_gpus, dim=0):
'Scatter inputs to target gpus.\n\n The only difference from original :func:`scatter` is to add support for\n :type:`~mmcv.parallel.DataContainer`.\n '
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
if (target_gpus != [(- 1)]):
return OrigScatter.apply(target_gpus, None, dim, obj)
else:
return Scatter.forward(target_gpus, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if (isinstance(obj, dict) and (len(obj) > 0)):
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
'Scatter with support for kwargs dictionary.'
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs)
|
def is_module_wrapper(module):
'Check if a module is a module wrapper.\n\n The following 3 modules in MMCV (and their subclasses) are regarded as\n module wrappers: DataParallel, DistributedDataParallel,\n MMDistributedDataParallel (the deprecated version). You may add you own\n module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: True if the input module is a module wrapper.\n '
module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())
return isinstance(module, module_wrappers)
|
class BaseModule(nn.Module, metaclass=ABCMeta):
'Base module for all modules in openmmlab.\n\n ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional\n functionality of parameter initialization. Compared with\n ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.\n\n - ``init_cfg``: the config to control the initialization.\n - ``init_weights``: The function of parameter initialization and recording\n initialization information.\n - ``_params_init_info``: Used to track the parameter initialization\n information. This attribute only exists during executing the\n ``init_weights``.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, init_cfg=None):
'Initialize BaseModule, inherited from `torch.nn.Module`'
super(BaseModule, self).__init__()
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
@property
def is_init(self):
return self._is_init
def init_weights(self):
'Initialize the weights.'
is_top_level_module = False
if (not hasattr(self, '_params_init_info')):
self._params_init_info = defaultdict(dict)
is_top_level_module = True
for (name, param) in self.named_parameters():
self._params_init_info[param]['init_info'] = f'The value is the same before and after calling `init_weights` of {self.__class__.__name__} '
self._params_init_info[param]['tmp_mean_value'] = param.data.mean()
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
logger_names = list(logger_initialized.keys())
logger_name = (logger_names[0] if logger_names else 'mmcv')
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info
module_name = self.__class__.__name__
if (not self._is_init):
if self.init_cfg:
print_log(f'initialize {module_name} with init_cfg {self.init_cfg}', logger=logger_name)
initialize(self, self.init_cfg)
if isinstance(self.init_cfg, dict):
if (self.init_cfg['type'] == 'Pretrained'):
return
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights()
update_init_info(m, init_info=f'Initialized by user-defined `init_weights` in {m.__class__.__name__} ')
self._is_init = True
else:
warnings.warn(f'init_weights of {self.__class__.__name__} has been called more than once.')
if is_top_level_module:
self._dump_init_info(logger_name)
for sub_module in self.modules():
del sub_module._params_init_info
@master_only
def _dump_init_info(self, logger_name):
'Dump the initialization information to a file named\n `initialization.log.json` in workdir.\n\n Args:\n logger_name (str): The name of logger.\n '
logger = get_logger(logger_name)
with_file_handler = False
for handler in logger.handlers:
if isinstance(handler, FileHandler):
handler.stream.write('Name of parameter - Initialization information\n')
for (name, param) in self.named_parameters():
handler.stream.write(f'''
{name} - {param.shape}:
{self._params_init_info[param]['init_info']}
''')
handler.stream.flush()
with_file_handler = True
if (not with_file_handler):
for (name, param) in self.named_parameters():
print_log(f'''
{name} - {param.shape}:
{self._params_init_info[param]['init_info']}
''', logger=logger_name)
def __repr__(self):
s = super().__repr__()
if self.init_cfg:
s += f'''
init_cfg={self.init_cfg}'''
return s
|
class Sequential(BaseModule, nn.Sequential):
'Sequential module in openmmlab.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, *args, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.Sequential.__init__(self, *args)
|
class ModuleList(BaseModule, nn.ModuleList):
'ModuleList in openmmlab.\n\n Args:\n modules (iterable, optional): an iterable of modules to add.\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleList.__init__(self, modules)
|
class ModuleDict(BaseModule, nn.ModuleDict):
'ModuleDict in openmmlab.\n\n Args:\n modules (dict, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module).\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleDict.__init__(self, modules)
|
class BaseRunner(metaclass=ABCMeta):
'The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an\n optimizer (in most cases) or a dict of optimizers (in models that\n requires more than one optimizer, e.g., GAN).\n work_dir (str, optional): The working directory to save checkpoints\n and logs. Defaults to None.\n logger (:obj:`logging.Logger`): Logger used during training.\n Defaults to None. (The default value is just for backward\n compatibility)\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n Defaults to None.\n max_epochs (int, optional): Total training epochs.\n max_iters (int, optional): Total training iterations.\n '
def __init__(self, model, batch_processor=None, optimizer=None, work_dir=None, logger=None, meta=None, max_iters=None, max_epochs=None):
if (batch_processor is not None):
if (not callable(batch_processor)):
raise TypeError(f'batch_processor must be callable, but got {type(batch_processor)}')
warnings.warn('batch_processor is deprecated, please implement train_step() and val_step() in the model instead.', DeprecationWarning)
if is_module_wrapper(model):
_model = model.module
else:
_model = model
if (hasattr(_model, 'train_step') or hasattr(_model, 'val_step')):
raise RuntimeError('batch_processor and model.train_step()/model.val_step() cannot be both available.')
else:
assert hasattr(model, 'train_step')
if isinstance(optimizer, dict):
for (name, optim) in optimizer.items():
if (not isinstance(optim, Optimizer)):
raise TypeError(f'optimizer must be a dict of torch.optim.Optimizers, but optimizer["{name}"] is a {type(optim)}')
elif ((not isinstance(optimizer, Optimizer)) and (optimizer is not None)):
raise TypeError(f'optimizer must be a torch.optim.Optimizer object or dict or None, but got {type(optimizer)}')
if (not isinstance(logger, logging.Logger)):
raise TypeError(f'logger must be a logging.Logger object, but got {type(logger)}')
if ((meta is not None) and (not isinstance(meta, dict))):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
self.model = model
self.batch_processor = batch_processor
self.optimizer = optimizer
self.logger = logger
self.meta = meta
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif (work_dir is None):
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
(self._rank, self._world_size) = get_dist_info()
self.timestamp = get_time_str()
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
if ((max_epochs is not None) and (max_iters is not None)):
raise ValueError('Only one of `max_epochs` or `max_iters` can be set.')
self._max_epochs = max_epochs
self._max_iters = max_iters
self.log_buffer = LogBuffer()
@property
def model_name(self):
'str: Name of the model, usually the module class name.'
return self._model_name
@property
def rank(self):
'int: Rank of current process. (distributed training)'
return self._rank
@property
def world_size(self):
'int: Number of processes participating in the job.\n (distributed training)'
return self._world_size
@property
def hooks(self):
'list[:obj:`Hook`]: A list of registered hooks.'
return self._hooks
@property
def epoch(self):
'int: Current epoch.'
return self._epoch
@property
def iter(self):
'int: Current iteration.'
return self._iter
@property
def inner_iter(self):
'int: Iteration in an epoch.'
return self._inner_iter
@property
def max_epochs(self):
'int: Maximum training epochs.'
return self._max_epochs
@property
def max_iters(self):
'int: Maximum training iterations.'
return self._max_iters
@abstractmethod
def train(self):
pass
@abstractmethod
def val(self):
pass
@abstractmethod
def run(self, data_loaders, workflow, **kwargs):
pass
@abstractmethod
def save_checkpoint(self, out_dir, filename_tmpl, save_optimizer=True, meta=None, create_symlink=True):
pass
def current_lr(self):
'Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this method\n will return a dict.\n '
if isinstance(self.optimizer, torch.optim.Optimizer):
lr = [group['lr'] for group in self.optimizer.param_groups]
elif isinstance(self.optimizer, dict):
lr = dict()
for (name, optim) in self.optimizer.items():
lr[name] = [group['lr'] for group in optim.param_groups]
else:
raise RuntimeError('lr is not applicable because optimizer does not exist.')
return lr
def current_momentum(self):
'Get current momentums.\n\n Returns:\n list[float] | dict[str, list[float]]: Current momentums of all\n param groups. If the runner has a dict of optimizers, this method\n will return a dict.\n '
def _get_momentum(optimizer):
momentums = []
for group in optimizer.param_groups:
if ('momentum' in group.keys()):
momentums.append(group['momentum'])
elif ('betas' in group.keys()):
momentums.append(group['betas'][0])
else:
momentums.append(0)
return momentums
if (self.optimizer is None):
raise RuntimeError('momentum is not applicable because optimizer does not exist.')
elif isinstance(self.optimizer, torch.optim.Optimizer):
momentums = _get_momentum(self.optimizer)
elif isinstance(self.optimizer, dict):
momentums = dict()
for (name, optim) in self.optimizer.items():
momentums[name] = _get_momentum(optim)
return momentums
def register_hook(self, hook, priority='NORMAL'):
'Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :class:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n '
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
inserted = False
for i in range((len(self._hooks) - 1), (- 1), (- 1)):
if (priority >= self._hooks[i].priority):
self._hooks.insert((i + 1), hook)
inserted = True
break
if (not inserted):
self._hooks.insert(0, hook)
def register_hook_from_cfg(self, hook_cfg):
"Register a hook from its cfg.\n\n Args:\n hook_cfg (dict): Hook config. It should have at least keys 'type'\n and 'priority' indicating its type and priority.\n\n Note:\n The specific hook class to register should not use 'type' and\n 'priority' arguments during initialization.\n "
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
self.register_hook(hook, priority=priority)
def call_hook(self, fn_name):
'Call all hooks.\n\n Args:\n fn_name (str): The function name in each hook to be called, such as\n "before_train_epoch".\n '
for hook in self._hooks:
getattr(hook, fn_name)(self)
def get_hook_info(self):
stage_hook_map = {stage: [] for stage in Hook.stages}
for hook in self.hooks:
try:
priority = Priority(hook.priority).name
except ValueError:
priority = hook.priority
classname = hook.__class__.__name__
hook_info = f'({priority:<12}) {classname:<35}'
for trigger_stage in hook.get_triggered_stages():
stage_hook_map[trigger_stage].append(hook_info)
stage_hook_infos = []
for stage in Hook.stages:
hook_infos = stage_hook_map[stage]
if (len(hook_infos) > 0):
info = f'''{stage}:
'''
info += '\n'.join(hook_infos)
info += '\n -------------------- '
stage_hook_infos.append(info)
return '\n'.join(stage_hook_infos)
def load_checkpoint(self, filename, map_location='cpu', strict=False, revise_keys=[('^module.', '')]):
return load_checkpoint(self.model, filename, map_location, strict, self.logger, revise_keys=revise_keys)
def resume(self, checkpoint, resume_optimizer=True, map_location='default'):
if (map_location == 'default'):
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id)))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if (self.meta is None):
self.meta = {}
self.meta.setdefault('hook_msgs', {})
self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
if ('config' in checkpoint['meta']):
config = mmcv.Config.fromstring(checkpoint['meta']['config'], file_format='.py')
previous_gpu_ids = config.get('gpu_ids', None)
if (previous_gpu_ids and (len(previous_gpu_ids) > 0) and (len(previous_gpu_ids) != self.world_size)):
self._iter = int(((self._iter * len(previous_gpu_ids)) / self.world_size))
self.logger.info('the iteration number is changed due to change of GPU number')
self.meta = checkpoint['meta']
if (('optimizer' in checkpoint) and resume_optimizer):
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(checkpoint['optimizer'][k])
else:
raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}')
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
def register_lr_hook(self, lr_config):
if (lr_config is None):
return
elif isinstance(lr_config, dict):
assert ('policy' in lr_config)
policy_type = lr_config.pop('policy')
if (policy_type == policy_type.lower()):
policy_type = policy_type.title()
hook_type = (policy_type + 'LrUpdaterHook')
lr_config['type'] = hook_type
hook = mmcv.build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook, priority='VERY_HIGH')
def register_momentum_hook(self, momentum_config):
if (momentum_config is None):
return
if isinstance(momentum_config, dict):
assert ('policy' in momentum_config)
policy_type = momentum_config.pop('policy')
if (policy_type == policy_type.lower()):
policy_type = policy_type.title()
hook_type = (policy_type + 'MomentumUpdaterHook')
momentum_config['type'] = hook_type
hook = mmcv.build_from_cfg(momentum_config, HOOKS)
else:
hook = momentum_config
self.register_hook(hook, priority='HIGH')
def register_optimizer_hook(self, optimizer_config):
if (optimizer_config is None):
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook, priority='ABOVE_NORMAL')
def register_checkpoint_hook(self, checkpoint_config):
if (checkpoint_config is None):
return
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook, priority='NORMAL')
def register_logger_hooks(self, log_config):
if (log_config is None):
return
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = mmcv.build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='VERY_LOW')
def register_timer_hook(self, timer_config):
if (timer_config is None):
return
if isinstance(timer_config, dict):
timer_config_ = copy.deepcopy(timer_config)
hook = mmcv.build_from_cfg(timer_config_, HOOKS)
else:
hook = timer_config
self.register_hook(hook, priority='LOW')
def register_custom_hooks(self, custom_config):
if (custom_config is None):
return
if (not isinstance(custom_config, list)):
custom_config = [custom_config]
for item in custom_config:
if isinstance(item, dict):
self.register_hook_from_cfg(item)
else:
self.register_hook(item, priority='NORMAL')
def register_profiler_hook(self, profiler_config):
if (profiler_config is None):
return
if isinstance(profiler_config, dict):
profiler_config.setdefault('type', 'ProfilerHook')
hook = mmcv.build_from_cfg(profiler_config, HOOKS)
else:
hook = profiler_config
self.register_hook(hook)
def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, timer_config=dict(type='IterTimerHook'), custom_hooks_config=None):
'Register default and custom hooks for training.\n\n Default and custom hooks include:\n\n +----------------------+-------------------------+\n | Hooks | Priority |\n +======================+=========================+\n | LrUpdaterHook | VERY_HIGH (10) |\n +----------------------+-------------------------+\n | MomentumUpdaterHook | HIGH (30) |\n +----------------------+-------------------------+\n | OptimizerStepperHook | ABOVE_NORMAL (40) |\n +----------------------+-------------------------+\n | CheckpointSaverHook | NORMAL (50) |\n +----------------------+-------------------------+\n | IterTimerHook | LOW (70) |\n +----------------------+-------------------------+\n | LoggerHook(s) | VERY_LOW (90) |\n +----------------------+-------------------------+\n | CustomHook(s) | defaults to NORMAL (50) |\n +----------------------+-------------------------+\n\n If custom hooks have same priority with default hooks, custom hooks\n will be triggered after default hooks.\n '
self.register_lr_hook(lr_config)
self.register_momentum_hook(momentum_config)
self.register_optimizer_hook(optimizer_config)
self.register_checkpoint_hook(checkpoint_config)
self.register_timer_hook(timer_config)
self.register_logger_hooks(log_config)
self.register_custom_hooks(custom_hooks_config)
|
def build_runner_constructor(cfg):
return RUNNER_BUILDERS.build(cfg)
|
def build_runner(cfg, default_args=None):
runner_cfg = copy.deepcopy(cfg)
constructor_type = runner_cfg.pop('constructor', 'DefaultRunnerConstructor')
runner_constructor = build_runner_constructor(dict(type=constructor_type, runner_cfg=runner_cfg, default_args=default_args))
runner = runner_constructor()
return runner
|
def _get_mmcv_home():
mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
|
def load_state_dict(module, state_dict, strict=False, logger=None):
"Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.load_state_dict`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n Args:\n module (Module): Module that receives the state_dict.\n state_dict (OrderedDict): Weights.\n strict (bool): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.\n logger (:obj:`logging.Logger`, optional): Logger to log the error\n message. If not specified, print function will be used.\n "
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
if is_module_wrapper(module):
module = module.module
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(module)
load = None
missing_keys = [key for key in all_missing_keys if ('num_batches_tracked' not in key)]
if unexpected_keys:
err_msg.append(f'''unexpected key in source state_dict: {', '.join(unexpected_keys)}
''')
if missing_keys:
err_msg.append(f'''missing keys in source state_dict: {', '.join(missing_keys)}
''')
(rank, _) = get_dist_info()
if ((len(err_msg) > 0) and (rank == 0)):
err_msg.insert(0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif (logger is not None):
logger.warning(err_msg)
else:
print(err_msg)
|
def get_torchvision_models():
model_urls = dict()
for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
|
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
|
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
|
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
|
def _process_mmcls_checkpoint(checkpoint):
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
|
class CheckpointLoader():
'A general checkpoint loader to manage all schemes.'
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if ((prefix not in cls._schemes) or force):
cls._schemes[prefix] = loader
else:
raise KeyError(f'{prefix} is already registered as a loader backend, add "force=True" if you want to override it')
cls._schemes = OrderedDict(sorted(cls._schemes.items(), key=(lambda t: t[0]), reverse=True))
@classmethod
def register_scheme(cls, prefixes, loader=None, force=False):
'Register a loader to CheckpointLoader.\n\n This method can be used as a normal class method or a decorator.\n\n Args:\n prefixes (str or list[str] or tuple[str]):\n The prefix of the registered loader.\n loader (function, optional): The loader function to be registered.\n When this method is used as a decorator, loader is None.\n Defaults to None.\n force (bool, optional): Whether to override the loader\n if the prefix has already been registered. Defaults to False.\n '
if (loader is not None):
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
@classmethod
def _get_checkpoint_loader(cls, path):
'Finds a loader that supports the given path. Falls back to the local\n loader if no other loader is found.\n\n Args:\n path (str): checkpoint path\n\n Returns:\n callable: checkpoint loader\n '
for p in cls._schemes:
if (re.match(p, path) is not None):
return cls._schemes[p]
@classmethod
def load_checkpoint(cls, filename, map_location=None, logger=None):
'load checkpoint through URL scheme path.\n\n Args:\n filename (str): checkpoint file name with given prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n logger (:mod:`logging.Logger`, optional): The logger for message.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
checkpoint_loader = cls._get_checkpoint_loader(filename)
class_name = checkpoint_loader.__name__
mmcv.print_log(f'load checkpoint from {class_name[10:]} path: {filename}', logger)
return checkpoint_loader(filename, map_location)
|
@CheckpointLoader.register_scheme(prefixes='')
def load_from_local(filename, map_location):
'load checkpoint by local file path.\n\n Args:\n filename (str): local checkpoint file path\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
filename = osp.expanduser(filename)
if (not osp.isfile(filename)):
raise FileNotFoundError(f'{filename} can not be found.')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
|
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
def load_from_http(filename, map_location=None, model_dir=None):
'load checkpoint through HTTP or HTTPS scheme path. In distributed\n setting, this function only download checkpoint at local rank 0.\n\n Args:\n filename (str): checkpoint file path with modelzoo or\n torchvision prefix\n map_location (str, optional): Same as :func:`torch.load`.\n model_dir (string, optional): directory in which to save the object,\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
(rank, world_size) = get_dist_info()
if (rank == 0):
checkpoint = load_url(filename, model_dir=model_dir, map_location=map_location)
if (world_size > 1):
torch.distributed.barrier()
if (rank > 0):
checkpoint = load_url(filename, model_dir=model_dir, map_location=map_location)
return checkpoint
|
@CheckpointLoader.register_scheme(prefixes='pavi://')
def load_from_pavi(filename, map_location=None):
'load checkpoint through the file path prefixed with pavi. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Args:\n filename (str): checkpoint file path with pavi prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
assert filename.startswith('pavi://'), f'Expected filename startswith `pavi://`, but get {filename}'
model_path = filename[7:]
try:
from pavi import modelcloud
except ImportError:
raise ImportError('Please install pavi to load checkpoint from modelcloud.')
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
return checkpoint
|
@CheckpointLoader.register_scheme(prefixes='(\\S+\\:)?s3://')
def load_from_ceph(filename, map_location=None, backend='petrel'):
"load checkpoint through the file path prefixed with s3. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Note:\n Since v1.4.1, the registered scheme prefixes have been enhanced to\n support bucket names in the path prefix, e.g. 's3://xx.xx/xx.path',\n 'bucket1:s3://xx.xx/xx.path'.\n\n Args:\n filename (str): checkpoint file path with s3 prefix\n map_location (str, optional): Same as :func:`torch.load`.\n backend (str, optional): The storage backend type. Options are 'ceph',\n 'petrel'. Default: 'petrel'.\n\n .. warning::\n :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,\n please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n "
allowed_backends = ['ceph', 'petrel']
if (backend not in allowed_backends):
raise ValueError(f'Load from Backend {backend} is not supported.')
if (backend == 'ceph'):
warnings.warn('CephBackend will be deprecated, please use PetrelBackend instead', DeprecationWarning)
try:
file_client = FileClient(backend=backend)
except ImportError:
allowed_backends.remove(backend)
file_client = FileClient(backend=allowed_backends[0])
with io.BytesIO(file_client.get(filename)) as buffer:
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
|
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
def load_from_torchvision(filename, map_location=None):
'load checkpoint through the file path prefixed with modelzoo or\n torchvision.\n\n Args:\n filename (str): checkpoint file path with modelzoo or\n torchvision prefix\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
model_urls = get_torchvision_models()
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please use "torchvision://" instead', DeprecationWarning)
model_name = filename[11:]
else:
model_name = filename[14:]
return load_from_http(model_urls[model_name], map_location=map_location)
|
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
def load_from_openmmlab(filename, map_location=None):
'load checkpoint through the file path prefixed with open-mmlab or\n openmmlab.\n\n Args:\n filename (str): checkpoint file path with open-mmlab or\n openmmlab prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
model_urls = get_external_models()
prefix_str = 'open-mmlab://'
if filename.startswith(prefix_str):
model_name = filename[13:]
else:
model_name = filename[12:]
prefix_str = 'openmmlab://'
deprecated_urls = get_deprecated_model_names()
if (model_name in deprecated_urls):
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor of {prefix_str}{deprecated_urls[model_name]}', DeprecationWarning)
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
if model_url.startswith(('http://', 'https://')):
checkpoint = load_from_http(model_url, map_location=map_location)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if (not osp.isfile(filename)):
raise FileNotFoundError(f'{filename} can not be found.')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
|
@CheckpointLoader.register_scheme(prefixes='mmcls://')
def load_from_mmcls(filename, map_location=None):
'load checkpoint through the file path prefixed with mmcls.\n\n Args:\n filename (str): checkpoint file path with mmcls prefix\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_from_http(model_urls[model_name], map_location=map_location)
checkpoint = _process_mmcls_checkpoint(checkpoint)
return checkpoint
|
def _load_checkpoint(filename, map_location=None, logger=None):
'Load checkpoint from somewhere (modelzoo, file, url).\n\n Args:\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None.\n logger (:mod:`logging.Logger`, optional): The logger for error message.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint. It can be either an\n OrderedDict storing model weights or a dict containing other\n information, which depends on the checkpoint.\n '
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
|
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
'Load partial pretrained model with specific prefix.\n\n Args:\n prefix (str): The prefix of sub-module.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str | None): Same as :func:`torch.load`. Default: None.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
checkpoint = _load_checkpoint(filename, map_location=map_location)
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if (not prefix.endswith('.')):
prefix += '.'
prefix_len = len(prefix)
state_dict = {k[prefix_len:]: v for (k, v) in state_dict.items() if k.startswith(prefix)}
assert state_dict, f'{prefix} is not in the pretrained model'
return state_dict
|
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[('^module\\.', '')]):
"Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str): Same as :func:`torch.load`.\n strict (bool): Whether to allow different params for the model and\n checkpoint.\n logger (:mod:`logging.Logger` or None): The logger for error message.\n revise_keys (list): A list of customized keywords to modify the\n state_dict in checkpoint. Each item is a (pattern, replacement)\n pair of the regular expression operations. Default: strip\n the prefix 'module.' by [(r'^module\\.', '')].\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n "
checkpoint = _load_checkpoint(filename, map_location, logger)
if (not isinstance(checkpoint, dict)):
raise RuntimeError(f'No state_dict found in checkpoint file {filename}')
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
metadata = getattr(state_dict, '_metadata', OrderedDict())
for (p, r) in revise_keys:
state_dict = OrderedDict({re.sub(p, r, k): v for (k, v) in state_dict.items()})
state_dict._metadata = metadata
load_state_dict(model, state_dict, strict, logger)
return checkpoint
|
def weights_to_cpu(state_dict):
'Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n '
state_dict_cpu = OrderedDict()
for (key, val) in state_dict.items():
state_dict_cpu[key] = val.cpu()
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
return state_dict_cpu
|
def _save_to_state_dict(module, destination, prefix, keep_vars):
'Saves module state to `destination` dictionary.\n\n This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.\n\n Args:\n module (nn.Module): The module to generate state_dict.\n destination (dict): A dict where state will be stored.\n prefix (str): The prefix for parameters and buffers used in this\n module.\n '
for (name, param) in module._parameters.items():
if (param is not None):
destination[(prefix + name)] = (param if keep_vars else param.detach())
for (name, buf) in module._buffers.items():
if (buf is not None):
destination[(prefix + name)] = (buf if keep_vars else buf.detach())
|
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
'Returns a dictionary containing a whole state of the module.\n\n Both parameters and persistent buffers (e.g. running averages) are\n included. Keys are corresponding parameter and buffer names.\n\n This method is modified from :meth:`torch.nn.Module.state_dict` to\n recursively check parallel module in case that the model has a complicated\n structure, e.g., nn.Module(nn.Module(DDP)).\n\n Args:\n module (nn.Module): The module to generate state_dict.\n destination (OrderedDict): Returned dict for the state of the\n module.\n prefix (str): Prefix of the key.\n keep_vars (bool): Whether to keep the variable property of the\n parameters. Default: False.\n\n Returns:\n dict: A dictionary containing a whole state of the module.\n '
if is_module_wrapper(module):
module = module.module
if (destination is None):
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:(- 1)]] = local_metadata = dict(version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for (name, child) in module._modules.items():
if (child is not None):
get_state_dict(child, destination, ((prefix + name) + '.'), keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if (hook_result is not None):
destination = hook_result
return destination
|
def save_checkpoint(model, filename, optimizer=None, meta=None, file_client_args=None):
'Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose params are to be saved.\n filename (str): Checkpoint filename.\n optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\n meta (dict, optional): Metadata to be saved in checkpoint.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n `New in version 1.3.16.`\n '
if (meta is None):
meta = {}
elif (not isinstance(meta, dict)):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if (hasattr(model, 'CLASSES') and (model.CLASSES is not None)):
meta.update(CLASSES=model.CLASSES)
checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model))}
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for (name, optim) in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
if (file_client_args is not None):
raise ValueError(f'file_client_args should be "None" if filename starts with"pavi://", but got {file_client_args}')
try:
from pavi import exception, modelcloud
except ImportError:
raise ImportError('Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
(model_dir, model_name) = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except exception.NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
file_client = FileClient.infer_client(file_client_args, filename)
with io.BytesIO() as f:
torch.save(checkpoint, f)
file_client.put(f.getvalue(), filename)
|
@RUNNER_BUILDERS.register_module()
class DefaultRunnerConstructor():
"Default constructor for runners.\n\n Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.\n For example, We can inject some new properties and functions for `Runner`.\n\n Example:\n >>> from mmcv.runner import RUNNER_BUILDERS, build_runner\n >>> # Define a new RunnerReconstructor\n >>> @RUNNER_BUILDERS.register_module()\n >>> class MyRunnerConstructor:\n ... def __init__(self, runner_cfg, default_args=None):\n ... if not isinstance(runner_cfg, dict):\n ... raise TypeError('runner_cfg should be a dict',\n ... f'but got {type(runner_cfg)}')\n ... self.runner_cfg = runner_cfg\n ... self.default_args = default_args\n ...\n ... def __call__(self):\n ... runner = RUNNERS.build(self.runner_cfg,\n ... default_args=self.default_args)\n ... # Add new properties for existing runner\n ... runner.my_name = 'my_runner'\n ... runner.my_function = lambda self: print(self.my_name)\n ... ...\n >>> # build your runner\n >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,\n ... constructor='MyRunnerConstructor')\n >>> runner = build_runner(runner_cfg)\n "
def __init__(self, runner_cfg, default_args=None):
if (not isinstance(runner_cfg, dict)):
raise TypeError('runner_cfg should be a dict', f'but got {type(runner_cfg)}')
self.runner_cfg = runner_cfg
self.default_args = default_args
def __call__(self):
return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
|
def init_dist(launcher, backend='nccl', **kwargs):
if (mp.get_start_method(allow_none=True) is None):
mp.set_start_method('spawn')
if (launcher == 'pytorch'):
_init_dist_pytorch(backend, **kwargs)
elif (launcher == 'mpi'):
_init_dist_mpi(backend, **kwargs)
elif (launcher == 'slurm'):
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
|
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((rank % num_gpus))
dist.init_process_group(backend=backend, **kwargs)
|
def _init_dist_mpi(backend, **kwargs):
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((rank % num_gpus))
dist.init_process_group(backend=backend, **kwargs)
|
def _init_dist_slurm(backend, port=None):
'Initialize slurm distributed training environment.\n\n If argument ``port`` is not specified, then the master port will be system\n environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system\n environment variable, then a default port ``29500`` will be used.\n\n Args:\n backend (str): Backend of torch.distributed.\n port (int, optional): Master port. Defaults to None.\n '
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((proc_id % num_gpus))
addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1')
if (port is not None):
os.environ['MASTER_PORT'] = str(port)
elif ('MASTER_PORT' in os.environ):
pass
else:
os.environ['MASTER_PORT'] = '29500'
if ('MASTER_ADDR' not in os.environ):
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str((proc_id % num_gpus))
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
|
def get_dist_info():
if (dist.is_available() and dist.is_initialized()):
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return (rank, world_size)
|
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
(rank, _) = get_dist_info()
if (rank == 0):
return func(*args, **kwargs)
return wrapper
|
def allreduce_params(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce parameters.\n\n Args:\n params (list[torch.Parameters]): List of parameters or buffers of a\n model.\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n '
(_, world_size) = get_dist_info()
if (world_size == 1):
return
params = [param.data for param in params]
if coalesce:
_allreduce_coalesced(params, world_size, bucket_size_mb)
else:
for tensor in params:
dist.all_reduce(tensor.div_(world_size))
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n '
grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))]
(_, world_size) = get_dist_info()
if (world_size == 1):
return
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
|
@RUNNERS.register_module()
class EpochBasedRunner(BaseRunner):
'Epoch-based Runner.\n\n This runner train models epoch by epoch.\n '
def run_iter(self, data_batch, train_mode, **kwargs):
if (self.batch_processor is not None):
outputs = self.batch_processor(self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if (not isinstance(outputs, dict)):
raise TypeError('"batch_processor()" or "model.train_step()"and "model.val_step()" must return a dict')
if ('log_vars' in outputs):
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = (self._max_epochs * len(self.data_loader))
self.call_hook('before_train_epoch')
time.sleep(2)
for (i, data_batch) in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, **kwargs)
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
@torch.no_grad()
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
time.sleep(2)
for (i, data_batch) in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
self.run_iter(data_batch, train_mode=False)
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch')
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n "
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert (len(data_loaders) == len(workflow))
if (max_epochs is not None):
warnings.warn('setting max_epochs in run is deprecated, please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert (self._max_epochs is not None), 'max_epochs must be specified during instantiation'
for (i, flow) in enumerate(workflow):
(mode, epochs) = flow
if (mode == 'train'):
self._max_iters = (self._max_epochs * len(data_loaders[i]))
break
work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE')
self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info())
self.logger.info('workflow: %s, max: %d epochs', workflow, self._max_epochs)
self.call_hook('before_run')
while (self.epoch < self._max_epochs):
for (i, flow) in enumerate(workflow):
(mode, epochs) = flow
if isinstance(mode, str):
if (not hasattr(self, mode)):
raise ValueError(f'runner has no method named "{mode}" to run an epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError('mode in workflow must be a str, but got {}'.format(type(mode)))
for _ in range(epochs):
if ((mode == 'train') and (self.epoch >= self._max_epochs)):
break
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1)
self.call_hook('after_run')
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True):
'Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to \'epoch_{}.pth\'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n "latest.pth" to point to the latest checkpoint.\n Defaults to True.\n '
if (meta is None):
meta = {}
elif (not isinstance(meta, dict)):
raise TypeError(f'meta should be a dict or None, but got {type(meta)}')
if (self.meta is not None):
meta.update(self.meta)
meta.update(epoch=(self.epoch + 1), iter=self.iter)
filename = filename_tmpl.format((self.epoch + 1))
filepath = osp.join(out_dir, filename)
optimizer = (self.optimizer if save_optimizer else None)
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
if (platform.system() != 'Windows'):
mmcv.symlink(filename, dst_file)
else:
shutil.copy(filepath, dst_file)
|
@RUNNERS.register_module()
class Runner(EpochBasedRunner):
'Deprecated name of EpochBasedRunner.'
def __init__(self, *args, **kwargs):
warnings.warn('Runner was deprecated, please use EpochBasedRunner instead', DeprecationWarning)
super().__init__(*args, **kwargs)
|
def cast_tensor_type(inputs, src_type, dst_type):
'Recursively convert Tensor in inputs from src_type to dst_type.\n\n Note:\n In v1.4.4 and later, ``cast_tersor_type`` will only convert the\n torch.Tensor which is consistent with ``src_type`` to the ``dst_type``.\n Before v1.4.4, it ignores the ``src_type`` argument, leading to some\n potential problems. For example,\n ``cast_tensor_type(inputs, torch.float, torch.half)`` will convert all\n tensors in inputs to ``torch.half`` including those originally in\n ``torch.Int`` or other types, which is not expected.\n\n Args:\n inputs: Inputs that to be casted.\n src_type (torch.dtype): Source type..\n dst_type (torch.dtype): Destination type.\n\n Returns:\n The same type with inputs, but all contained Tensors have been cast.\n '
if isinstance(inputs, nn.Module):
return inputs
elif isinstance(inputs, torch.Tensor):
return (inputs.to(dst_type) if (inputs.dtype == src_type) else inputs)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({k: cast_tensor_type(v, src_type, dst_type) for (k, v) in inputs.items()})
elif isinstance(inputs, abc.Iterable):
return type(inputs)((cast_tensor_type(item, src_type, dst_type) for item in inputs))
else:
return inputs
|
def auto_fp16(apply_to=None, out_fp32=False):
"Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other than fp32 tensors are\n ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp32 (bool): Whether to convert the output back to fp32.\n\n Example:\n\n >>> import torch.nn as nn\n >>> class MyModule1(nn.Module):\n >>>\n >>> # Convert x and y to fp16\n >>> @auto_fp16()\n >>> def forward(self, x, y):\n >>> pass\n\n >>> import torch.nn as nn\n >>> class MyModule2(nn.Module):\n >>>\n >>> # convert pred to fp16\n >>> @auto_fp16(apply_to=('pred', ))\n >>> def do_something(self, pred, others):\n >>> pass\n "
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module')
if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)):
return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
args_to_cast = (args_info.args if (apply_to is None) else apply_to)
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for (i, arg_name) in enumerate(arg_names):
if (arg_name in args_to_cast):
new_args.append(cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
new_kwargs = {}
if kwargs:
for (arg_name, arg_value) in kwargs.items():
if (arg_name in args_to_cast):
new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))):
with autocast(enabled=True):
output = old_func(*new_args, **new_kwargs)
else:
output = old_func(*new_args, **new_kwargs)
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.