code stringlengths 17 6.64M |
|---|
class FurthestPointSampling(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_xyz: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n points_xyz (torch.Tensor): (B, N, 3) where N > num_points.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n '
assert points_xyz.is_contiguous()
(B, N) = points_xyz.size()[:2]
output = torch.cuda.IntTensor(B, num_points)
temp = torch.cuda.FloatTensor(B, N).fill_(10000000000.0)
ext_module.furthest_point_sampling_forward(points_xyz, temp, output, b=B, n=N, m=num_points)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(xyz, a=None):
return (None, None)
|
class FurthestPointSamplingWithDist(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n points_dist (torch.Tensor): (B, N, N) Distance between each point\n pair.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n '
assert points_dist.is_contiguous()
(B, N, _) = points_dist.size()
output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
temp = points_dist.new_zeros([B, N]).fill_(10000000000.0)
ext_module.furthest_point_sampling_with_dist_forward(points_dist, temp, output, b=B, n=N, m=num_points)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(xyz, a=None):
return (None, None)
|
class FusedBiasLeakyReLUFunctionBackward(Function):
'Calculate second order deviation.\n\n This function is to compute the second order deviation for the fused leaky\n relu operation.\n '
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = ext_module.fused_bias_leakyrelu(grad_output, empty, out, act=3, grad=1, alpha=negative_slope, scale=scale)
dim = [0]
if (grad_input.ndim > 2):
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return (grad_input, grad_bias)
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
(out,) = ctx.saved_tensors
gradgrad_out = ext_module.fused_bias_leakyrelu(gradgrad_input, gradgrad_bias.to(out.dtype), out, act=3, grad=1, alpha=ctx.negative_slope, scale=ctx.scale)
return (gradgrad_out, None, None, None)
|
class FusedBiasLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = ext_module.fused_bias_leakyrelu(input, bias, empty, act=3, grad=0, alpha=negative_slope, scale=scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
(out,) = ctx.saved_tensors
(grad_input, grad_bias) = FusedBiasLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale)
return (grad_input, grad_bias, None, None)
|
class FusedBiasLeakyReLU(nn.Module):
'Fused bias leaky ReLU.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n TODO: Implement the CPU version.\n\n Args:\n channel (int): The channel number of the feature map.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n '
def __init__(self, num_channels, negative_slope=0.2, scale=(2 ** 0.5)):
super(FusedBiasLeakyReLU, self).__init__()
self.bias = nn.Parameter(torch.zeros(num_channels))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_bias_leakyrelu(input, self.bias, self.negative_slope, self.scale)
|
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)):
'Fused bias leaky ReLU function.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n Args:\n input (torch.Tensor): Input feature map.\n bias (nn.Parameter): The bias from convolution operation.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n\n Returns:\n torch.Tensor: Feature map after non-linear activation.\n '
if (not input.is_cuda):
return bias_leakyrelu_ref(input, bias, negative_slope, scale)
return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype), negative_slope, scale)
|
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=(2 ** 0.5)):
if (bias is not None):
assert (bias.ndim == 1)
assert (bias.shape[0] == x.shape[1])
x = (x + bias.reshape([((- 1) if (i == 1) else 1) for i in range(x.ndim)]))
x = F.leaky_relu(x, negative_slope)
if (scale != 1):
x = (x * scale)
return x
|
class GatherPoints(Function):
'Gather points with given index.'
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
'\n Args:\n features (torch.Tensor): (B, C, N) features to gather.\n indices (torch.Tensor): (B, M) where M is the number of points.\n\n Returns:\n torch.Tensor: (B, C, M) where M is the number of points.\n '
assert features.is_contiguous()
assert indices.is_contiguous()
(B, npoint) = indices.size()
(_, C, N) = features.size()
output = features.new_zeros((B, C, npoint))
ext_module.gather_points_forward(features, indices, output, b=B, c=C, n=N, npoints=npoint)
ctx.for_backwards = (indices, C, N)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(indices)
return output
@staticmethod
def backward(ctx, grad_out):
(idx, C, N) = ctx.for_backwards
(B, npoint) = idx.size()
grad_features = grad_out.new_zeros((B, C, N))
grad_out_data = grad_out.data.contiguous()
ext_module.gather_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint)
return (grad_features, None)
|
def get_onnxruntime_op_path():
wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_ort.*.so')
paths = glob.glob(wildcard)
if (len(paths) > 0):
return paths[0]
else:
return ''
|
def boxes_iou_bev(boxes_a, boxes_b):
"Calculate boxes IoU in the Bird's Eye View.\n\n Args:\n boxes_a (torch.Tensor): Input boxes a with shape (M, 5).\n boxes_b (torch.Tensor): Input boxes b with shape (N, 5).\n\n Returns:\n torch.Tensor: IoU result with shape (M, N).\n "
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
|
def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
'NMS function GPU implementation (for BEV boxes). The overlap of two\n boxes for IoU calculation is defined as the exact overlapping area of the\n two boxes. In this function, one can also set ``pre_max_size`` and\n ``post_max_size``.\n\n Args:\n boxes (torch.Tensor): Input boxes with the shape of [N, 5]\n ([x1, y1, x2, y2, ry]).\n scores (torch.Tensor): Scores of boxes with the shape of [N].\n thresh (float): Overlap threshold of NMS.\n pre_max_size (int, optional): Max size of boxes before NMS.\n Default: None.\n post_max_size (int, optional): Max size of boxes after NMS.\n Default: None.\n\n Returns:\n torch.Tensor: Indexes after NMS.\n '
assert (boxes.size(1) == 5), 'Input boxes shape should be [N, 5]'
order = scores.sort(0, descending=True)[1]
if (pre_max_size is not None):
order = order[:pre_max_size]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = torch.zeros(size=(), dtype=torch.long)
ext_module.iou3d_nms_forward(boxes, keep, num_out, nms_overlap_thresh=thresh)
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
if (post_max_size is not None):
keep = keep[:post_max_size]
return keep
|
def nms_normal_bev(boxes, scores, thresh):
'Normal NMS function GPU implementation (for BEV boxes). The overlap of\n two boxes for IoU calculation is defined as the exact overlapping area of\n the two boxes WITH their yaw angle set to 0.\n\n Args:\n boxes (torch.Tensor): Input boxes with shape (N, 5).\n scores (torch.Tensor): Scores of predicted boxes with shape (N).\n thresh (float): Overlap threshold of NMS.\n\n Returns:\n torch.Tensor: Remaining indices with scores in descending order.\n '
assert (boxes.shape[1] == 5), 'Input boxes shape should be [N, 5]'
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = torch.zeros(size=(), dtype=torch.long)
ext_module.iou3d_nms_normal_forward(boxes, keep, num_out, nms_overlap_thresh=thresh)
return order[keep[:num_out].cuda(boxes.device)].contiguous()
|
class KNN(Function):
'KNN (CUDA) based on heap data structure.\n\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/pointops/src/knnquery_heap>`_.\n\n Find k-nearest points.\n '
@staticmethod
def forward(ctx, k: int, xyz: torch.Tensor, center_xyz: torch.Tensor=None, transposed: bool=False) -> torch.Tensor:
'\n Args:\n k (int): number of nearest neighbors.\n xyz (torch.Tensor): (B, N, 3) if transposed == False, else\n (B, 3, N). xyz coordinates of the features.\n center_xyz (torch.Tensor, optional): (B, npoint, 3) if transposed\n is False, else (B, 3, npoint). centers of the knn query.\n Default: None.\n transposed (bool, optional): whether the input tensors are\n transposed. Should not explicitly use this keyword when\n calling knn (=KNN.apply), just add the fourth param.\n Default: False.\n\n Returns:\n torch.Tensor: (B, k, npoint) tensor with the indices of the\n features that form k-nearest neighbours.\n '
assert ((k > 0) & (k < 100)), 'k should be in range(0, 100)'
if (center_xyz is None):
center_xyz = xyz
if transposed:
xyz = xyz.transpose(2, 1).contiguous()
center_xyz = center_xyz.transpose(2, 1).contiguous()
assert xyz.is_contiguous()
assert center_xyz.is_contiguous()
center_xyz_device = center_xyz.get_device()
assert (center_xyz_device == xyz.get_device()), 'center_xyz and xyz should be put on the same device'
if (torch.cuda.current_device() != center_xyz_device):
torch.cuda.set_device(center_xyz_device)
(B, npoint, _) = center_xyz.shape
N = xyz.shape[1]
idx = center_xyz.new_zeros((B, npoint, k)).int()
dist2 = center_xyz.new_zeros((B, npoint, k)).float()
ext_module.knn_forward(xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k)
idx = idx.transpose(2, 1).contiguous()
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return idx
@staticmethod
def backward(ctx, a=None):
return (None, None, None)
|
class BaseMergeCell(nn.Module):
'The basic class for cells used in NAS-FPN and NAS-FCOS.\n\n BaseMergeCell takes 2 inputs. After applying convolution\n on them, they are resized to the target size. Then,\n they go through binary_op, which depends on the type of cell.\n If with_out_conv is True, the result of output will go through\n another convolution layer.\n\n Args:\n in_channels (int): number of input channels in out_conv layer.\n out_channels (int): number of output channels in out_conv layer.\n with_out_conv (bool): Whether to use out_conv layer\n out_conv_cfg (dict): Config dict for convolution layer, which should\n contain "groups", "kernel_size", "padding", "bias" to build\n out_conv layer.\n out_norm_cfg (dict): Config dict for normalization layer in out_conv.\n out_conv_order (tuple): The order of conv/norm/activation layers in\n out_conv.\n with_input1_conv (bool): Whether to use convolution on input1.\n with_input2_conv (bool): Whether to use convolution on input2.\n input_conv_cfg (dict): Config dict for building input1_conv layer and\n input2_conv layer, which is expected to contain the type of\n convolution.\n Default: None, which means using conv2d.\n input_norm_cfg (dict): Config dict for normalization layer in\n input1_conv and input2_conv layer. Default: None.\n upsample_mode (str): Interpolation method used to resize the output\n of input1_conv and input2_conv to target size. Currently, we\n support [\'nearest\', \'bilinear\']. Default: \'nearest\'.\n '
def __init__(self, fused_channels=256, out_channels=256, with_out_conv=True, out_conv_cfg=dict(groups=1, kernel_size=3, padding=1, bias=True), out_norm_cfg=None, out_conv_order=('act', 'conv', 'norm'), with_input1_conv=False, with_input2_conv=False, input_conv_cfg=None, input_norm_cfg=None, upsample_mode='nearest'):
super(BaseMergeCell, self).__init__()
assert (upsample_mode in ['nearest', 'bilinear'])
self.with_out_conv = with_out_conv
self.with_input1_conv = with_input1_conv
self.with_input2_conv = with_input2_conv
self.upsample_mode = upsample_mode
if self.with_out_conv:
self.out_conv = ConvModule(fused_channels, out_channels, **out_conv_cfg, norm_cfg=out_norm_cfg, order=out_conv_order)
self.input1_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input1_conv else nn.Sequential())
self.input2_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input2_conv else nn.Sequential())
def _build_input_conv(self, channel, conv_cfg, norm_cfg):
return ConvModule(channel, channel, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True)
@abstractmethod
def _binary_op(self, x1, x2):
pass
def _resize(self, x, size):
if (x.shape[(- 2):] == size):
return x
elif (x.shape[(- 2):] < size):
return F.interpolate(x, size=size, mode=self.upsample_mode)
else:
assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0))
kernel_size = (x.shape[(- 1)] // size[(- 1)])
x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
return x
def forward(self, x1, x2, out_size=None):
assert (x1.shape[:2] == x2.shape[:2])
assert ((out_size is None) or (len(out_size) == 2))
if (out_size is None):
out_size = max(x1.size()[2:], x2.size()[2:])
x1 = self.input1_conv(x1)
x2 = self.input2_conv(x2)
x1 = self._resize(x1, out_size)
x2 = self._resize(x2, out_size)
x = self._binary_op(x1, x2)
if self.with_out_conv:
x = self.out_conv(x)
return x
|
class SumCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(SumCell, self).__init__(in_channels, out_channels, **kwargs)
def _binary_op(self, x1, x2):
return (x1 + x2)
|
class ConcatCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConcatCell, self).__init__((in_channels * 2), out_channels, **kwargs)
def _binary_op(self, x1, x2):
ret = torch.cat([x1, x2], dim=1)
return ret
|
class GlobalPoolingCell(BaseMergeCell):
def __init__(self, in_channels=None, out_channels=None, **kwargs):
super().__init__(in_channels, out_channels, **kwargs)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
def _binary_op(self, x1, x2):
x2_att = self.global_pool(x2).sigmoid()
return (x2 + (x2_att * x1))
|
def min_area_polygons(pointsets):
'Find the smallest polygons that surrounds all points in the point sets.\n\n Args:\n pointsets (Tensor): point sets with shape (N, 18).\n\n Returns:\n torch.Tensor: Return the smallest polygons with shape (N, 8).\n '
polygons = pointsets.new_zeros((pointsets.size(0), 8))
ext_module.min_area_polygons(pointsets, polygons)
return polygons
|
class ModulatedDeformConv2dFunction(Function):
@staticmethod
def symbolic(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups):
input_tensors = [input, offset, mask, weight]
if (bias is not None):
input_tensors.append(bias)
return g.op('mmcv::MMCVModulatedDeformConv2d', *input_tensors, stride_i=stride, padding_i=padding, dilation_i=dilation, groups_i=groups, deform_groups_i=deform_groups)
@staticmethod
def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deform_groups=1):
if ((input is not None) and (input.dim() != 4)):
raise ValueError(f'Expected 4D tensor as input, got {input.dim()}D tensor instead.')
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deform_groups = deform_groups
ctx.with_bias = (bias is not None)
if (not ctx.with_bias):
bias = input.new_empty(0)
input = input.type_as(offset)
weight = weight.type_as(input)
bias = bias.type_as(input)
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(ModulatedDeformConv2dFunction._output_size(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
ext_module.modulated_deform_conv_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, offset, mask, weight, bias) = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
grad_output = grad_output.contiguous()
ext_module.modulated_deform_conv_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias)
if (not ctx.with_bias):
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None)
@staticmethod
def _output_size(ctx, input, weight):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range((input.dim() - 2)):
in_size = input.size((d + 2))
pad = ctx.padding[d]
kernel = ((ctx.dilation[d] * (weight.size((d + 2)) - 1)) + 1)
stride_ = ctx.stride[d]
output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),)
if (not all(map((lambda s: (s > 0)), output_size))):
raise ValueError((('convolution input is too small (output would be ' + 'x'.join(map(str, output_size))) + ')'))
return output_size
|
class ModulatedDeformConv2d(nn.Module):
@deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='ModulatedDeformConv2d')
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=True):
super(ModulatedDeformConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deform_groups = deform_groups
self.transposed = False
self.output_padding = _single(0)
self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // groups), *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.init_weights()
def init_weights(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.zero_()
def forward(self, x, offset, mask):
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
|
@CONV_LAYERS.register_module('DCNv2')
class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
'A ModulatedDeformable Conv Encapsulation that acts as normal Conv\n layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int): Same as nn.Conv2d, while tuple is not supported.\n padding (int): Same as nn.Conv2d, while tuple is not supported.\n dilation (int): Same as nn.Conv2d, while tuple is not supported.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n '
_version = 2
def __init__(self, *args, **kwargs):
super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deform_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation, bias=True)
self.init_weights()
def init_weights(self):
super(ModulatedDeformConv2dPack, self).init_weights()
if hasattr(self, 'conv_offset'):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x):
out = self.conv_offset(x)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
if (((prefix + 'conv_offset.weight') not in state_dict) and ((prefix[:(- 1)] + '_offset.weight') in state_dict)):
state_dict[(prefix + 'conv_offset.weight')] = state_dict.pop((prefix[:(- 1)] + '_offset.weight'))
if (((prefix + 'conv_offset.bias') not in state_dict) and ((prefix[:(- 1)] + '_offset.bias') in state_dict)):
state_dict[(prefix + 'conv_offset.bias')] = state_dict.pop((prefix[:(- 1)] + '_offset.bias'))
if ((version is not None) and (version > 1)):
print_log(f"ModulatedDeformConvPack {prefix.rstrip('.')} is upgraded to version 2.", logger='root')
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
|
def pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold):
'Group pixels into text instances, which is widely used text detection\n methods.\n\n Arguments:\n score (np.array or torch.Tensor): The foreground score with size hxw.\n mask (np.array or Tensor): The foreground mask with size hxw.\n embedding (np.array or torch.Tensor): The embedding with size hxwxc to\n distinguish instances.\n kernel_label (np.array or torch.Tensor): The instance kernel index with\n size hxw.\n kernel_contour (np.array or torch.Tensor): The kernel contour with\n size hxw.\n kernel_region_num (int): The instance kernel region number.\n distance_threshold (float): The embedding distance threshold between\n kernel and pixel in one instance.\n\n Returns:\n list[list[float]]: The instance coordinates and attributes list. Each\n element consists of averaged confidence, pixel number, and coordinates\n (x_i, y_i for all pixels) in order.\n '
assert isinstance(score, (torch.Tensor, np.ndarray))
assert isinstance(mask, (torch.Tensor, np.ndarray))
assert isinstance(embedding, (torch.Tensor, np.ndarray))
assert isinstance(kernel_label, (torch.Tensor, np.ndarray))
assert isinstance(kernel_contour, (torch.Tensor, np.ndarray))
assert isinstance(kernel_region_num, int)
assert isinstance(distance_threshold, float)
if isinstance(score, np.ndarray):
score = torch.from_numpy(score)
if isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
if isinstance(embedding, np.ndarray):
embedding = torch.from_numpy(embedding)
if isinstance(kernel_label, np.ndarray):
kernel_label = torch.from_numpy(kernel_label)
if isinstance(kernel_contour, np.ndarray):
kernel_contour = torch.from_numpy(kernel_contour)
if (torch.__version__ == 'parrots'):
label = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num=kernel_region_num, distance_threshold=distance_threshold)
label = label.tolist()
label = label[0]
list_index = kernel_region_num
pixel_assignment = []
for x in range(kernel_region_num):
pixel_assignment.append(np.array(label[list_index:(list_index + int(label[x]))], dtype=np.float))
list_index = (list_index + int(label[x]))
else:
pixel_assignment = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold)
return pixel_assignment
|
def points_in_boxes_part(points, boxes):
'Find the box in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate.\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in\n LiDAR/DEPTH coordinate, (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M). Default background = -1.\n '
assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_((- 1))
points_device = points.get_device()
assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device'
if (torch.cuda.current_device() != points_device):
torch.cuda.set_device(points_device)
ext_module.points_in_boxes_part_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
|
def points_in_boxes_cpu(points, boxes):
'Find all boxes in which each point is (CPU). The CPU version of\n :meth:`points_in_boxes_all`.\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in\n LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n '
assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
num_boxes = boxes.shape[1]
point_indices = points.new_zeros((batch_size, num_boxes, num_points), dtype=torch.int)
for b in range(batch_size):
ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), points[b].float().contiguous(), point_indices[b])
point_indices = point_indices.transpose(1, 2)
return point_indices
|
def points_in_boxes_all(points, boxes):
'Find all boxes in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n '
assert (boxes.shape[0] == points.shape[0]), f'Points and boxes should have the same batch size, but got {boxes.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
num_boxes = boxes.shape[1]
box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), dtype=torch.int).fill_(0)
points_device = points.get_device()
assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device'
if (torch.cuda.current_device() != points_device):
torch.cuda.set_device(points_device)
ext_module.points_in_boxes_all_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
|
def points_in_polygons(points, polygons):
'Judging whether points are inside polygons, which is used in the ATSS\n assignment for the rotated boxes.\n\n It should be noted that when the point is just at the polygon boundary, the\n judgment will be inaccurate, but the effect on assignment is limited.\n\n Args:\n points (torch.Tensor): It has shape (B, 2), indicating (x, y).\n M means the number of predicted points.\n polygons (torch.Tensor): It has shape (M, 8), indicating\n (x1, y1, x2, y2, x3, y3, x4, y4). M means the number of\n ground truth polygons.\n\n Returns:\n torch.Tensor: Return the result with the shape of (B, M),\n 1 indicates that the point is inside the polygon,\n 0 indicates that the point is outside the polygon.\n '
assert (points.shape[1] == 2), f'points dimension should be 2, but got unexpected shape {points.shape[1]}'
assert (polygons.shape[1] == 8), f'polygons dimension should be 8, but got unexpected shape {polygons.shape[1]}'
output = torch.full([points.shape[0], polygons.shape[0]], 0.0).cuda().float()
ext_module.points_in_polygons_forward(points.contiguous(), polygons.contiguous(), output)
return output
|
class PSAMaskFunction(Function):
@staticmethod
def symbolic(g, input, psa_type, mask_size):
return g.op('mmcv::MMCVPSAMask', input, psa_type_i=psa_type, mask_size_i=mask_size)
@staticmethod
def forward(ctx, input, psa_type, mask_size):
ctx.psa_type = psa_type
ctx.mask_size = _pair(mask_size)
ctx.save_for_backward(input)
(h_mask, w_mask) = ctx.mask_size
(batch_size, channels, h_feature, w_feature) = input.size()
assert (channels == (h_mask * w_mask))
output = input.new_zeros((batch_size, (h_feature * w_feature), h_feature, w_feature))
ext_module.psamask_forward(input, output, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2))
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
psa_type = ctx.psa_type
(h_mask, w_mask) = ctx.mask_size
(batch_size, channels, h_feature, w_feature) = input.size()
grad_input = grad_output.new_zeros((batch_size, channels, h_feature, w_feature))
ext_module.psamask_backward(grad_output, grad_input, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2))
return (grad_input, None, None, None)
|
class PSAMask(nn.Module):
def __init__(self, psa_type, mask_size=None):
super(PSAMask, self).__init__()
assert (psa_type in ['collect', 'distribute'])
if (psa_type == 'collect'):
psa_type_enum = 0
else:
psa_type_enum = 1
self.psa_type_enum = psa_type_enum
self.mask_size = mask_size
self.psa_type = psa_type
def forward(self, input):
return psa_mask(input, self.psa_type_enum, self.mask_size)
def __repr__(self):
s = self.__class__.__name__
s += f'(psa_type={self.psa_type}, '
s += f'mask_size={self.mask_size})'
return s
|
class RiRoIAlignRotatedFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False):
if isinstance(out_size, int):
out_h = out_size
out_w = out_size
elif is_tuple_of(out_size, int):
assert (len(out_size) == 2)
(out_h, out_w) = out_size
else:
raise TypeError(f'"out_size" should be an integer or tuple of integers, but got {out_size}')
ctx.spatial_scale = spatial_scale
ctx.num_samples = num_samples
ctx.num_orientations = num_orientations
ctx.clockwise = clockwise
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
(batch_size, num_channels, _, _) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
ext_module.riroi_align_rotated_forward(features, rois, output, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise)
return output
@staticmethod
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
num_orientations = ctx.num_orientations
clockwise = ctx.clockwise
num_samples = ctx.num_samples
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
(batch_size, num_channels, feature_h, feature_w) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, feature_h, feature_w)
ext_module.riroi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise)
return (grad_input, grad_rois, None, None, None, None, None)
|
class RiRoIAlignRotated(nn.Module):
'Rotation-invariant RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n The details are described in the paper `ReDet: A Rotation-equivariant\n Detector for Aerial Object Detection <https://arxiv.org/abs/2103.07733>`_.\n\n Args:\n out_size (tuple): fixed dimensional RoI output with shape (h, w).\n spatial_scale (float): scale the input boxes by this number\n num_samples (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n num_orientations (int): number of oriented channels.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n '
def __init__(self, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False):
super(RiRoIAlignRotated, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.num_samples = int(num_samples)
self.num_orientations = int(num_orientations)
self.clockwise = clockwise
def forward(self, features, rois):
return RiRoIAlignRotatedFunction.apply(features, rois, self.out_size, self.spatial_scale, self.num_samples, self.num_orientations, self.clockwise)
|
class RoIAlignFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, pool_mode, aligned):
from ..onnx import is_custom_op_loaded
has_custom_op = is_custom_op_loaded()
if has_custom_op:
return g.op('mmcv::MMCVRoiAlign', input, rois, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, mode_s=pool_mode, aligned_i=aligned)
else:
from torch.onnx import TensorProtoDataType
from torch.onnx.symbolic_helper import _slice_helper
from torch.onnx.symbolic_opset9 import squeeze, sub
batch_indices = _slice_helper(g, rois, axes=[1], starts=[0], ends=[1])
batch_indices = squeeze(g, batch_indices, 1)
batch_indices = g.op('Cast', batch_indices, to_i=TensorProtoDataType.INT64)
rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
if aligned:
aligned_offset = g.op('Constant', value_t=torch.tensor([(0.5 / spatial_scale)], dtype=torch.float32))
rois = sub(g, rois, aligned_offset)
return g.op('RoiAlign', input, rois, batch_indices, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=max(0, sampling_ratio), mode_s=pool_mode)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
assert (pool_mode in ('max', 'avg'))
ctx.pool_mode = (0 if (pool_mode == 'max') else 1)
ctx.aligned = aligned
ctx.input_shape = input.size()
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
if (ctx.pool_mode == 0):
argmax_y = input.new_zeros(output_shape)
argmax_x = input.new_zeros(output_shape)
else:
argmax_y = input.new_zeros(0)
argmax_x = input.new_zeros(0)
ext_module.roi_align_forward(input, rois, output, argmax_y, argmax_x, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned)
ctx.save_for_backward(rois, argmax_y, argmax_x)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois, argmax_y, argmax_x) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
grad_output = grad_output.contiguous()
ext_module.roi_align_backward(grad_output, rois, argmax_y, argmax_x, grad_input, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned)
return (grad_input, None, None, None, None, None, None)
|
class RoIAlign(nn.Module):
"RoI align pooling layer.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n pool_mode (str, 'avg' or 'max'): pooling mode in each bin.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n use_torchvision (bool): whether to use roi_align from torchvision.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
@deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlign')
def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True, use_torchvision=False):
super(RoIAlign, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.pool_mode = pool_mode
self.aligned = aligned
self.use_torchvision = use_torchvision
def forward(self, input, rois):
'\n Args:\n input: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.\n '
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
if ('aligned' in tv_roi_align.__code__.co_varnames):
return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
else:
if self.aligned:
rois -= rois.new_tensor(([0.0] + ([(0.5 / self.spatial_scale)] * 4)))
return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
else:
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.pool_mode, self.aligned)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale}, '
s += f'sampling_ratio={self.sampling_ratio}, '
s += f'pool_mode={self.pool_mode}, '
s += f'aligned={self.aligned}, '
s += f'use_torchvision={self.use_torchvision})'
return s
|
class RoIAlignRotatedFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, aligned, clockwise):
if isinstance(output_size, int):
out_h = output_size
out_w = output_size
elif isinstance(output_size, tuple):
assert (len(output_size) == 2)
assert isinstance(output_size[0], int)
assert isinstance(output_size[1], int)
(out_h, out_w) = output_size
else:
raise TypeError('"output_size" must be an integer or tuple of integers')
return g.op('mmcv::MMCVRoIAlignRotated', input, rois, output_height_i=out_h, output_width_i=out_h, spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, aligned_i=aligned, clockwise_i=clockwise)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.aligned = aligned
ctx.clockwise = clockwise
ctx.save_for_backward(rois)
ctx.feature_size = input.size()
(batch_size, num_channels, data_height, data_width) = input.size()
num_rois = rois.size(0)
output = input.new_zeros(num_rois, num_channels, ctx.output_size[0], ctx.output_size[1])
ext_module.roi_align_rotated_forward(input, rois, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise)
return output
@staticmethod
def backward(ctx, grad_output):
feature_size = ctx.feature_size
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
(batch_size, num_channels, data_height, data_width) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width)
ext_module.roi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise)
return (grad_input, grad_rois, None, None, None, None, None)
|
class RoIAlignRotated(nn.Module):
"RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio(int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n Default: True.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
@deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlignRotated')
def __init__(self, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False):
super(RoIAlignRotated, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.aligned = aligned
self.clockwise = clockwise
def forward(self, input, rois):
return RoIAlignRotatedFunction.apply(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned, self.clockwise)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale}, '
s += f'sampling_ratio={self.sampling_ratio}, '
s += f'aligned={self.aligned}, '
s += f'clockwise={self.clockwise})'
return s
|
class RoIPoolFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale):
return g.op('MaxRoiPool', input, rois, pooled_shape_i=output_size, spatial_scale_f=spatial_scale)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale=1.0):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
argmax = input.new_zeros(output_shape, dtype=torch.int)
ext_module.roi_pool_forward(input, rois, output, argmax, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale)
ctx.save_for_backward(rois, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois, argmax) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
ext_module.roi_pool_backward(grad_output, rois, argmax, grad_input, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale)
return (grad_input, None, None, None)
|
class RoIPool(nn.Module):
def __init__(self, output_size, spatial_scale=1.0):
super(RoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale})'
return s
|
class RoIAwarePool3d(nn.Module):
"Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_ for more\n details.\n\n Args:\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int, optional): The maximum number of points per\n voxel. Default: 128.\n mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'.\n Default: 'max'.\n "
def __init__(self, out_size, max_pts_per_voxel=128, mode='max'):
super().__init__()
self.out_size = out_size
self.max_pts_per_voxel = max_pts_per_voxel
assert (mode in ['max', 'avg'])
pool_mapping = {'max': 0, 'avg': 1}
self.mode = pool_mapping[mode]
def forward(self, rois, pts, pts_feature):
'\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n '
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_per_voxel, self.mode)
|
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode):
'\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int): The maximum number of points per voxel.\n Default: 128.\n mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average\n pool).\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n '
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert (len(out_size) == 3)
assert mmcv.is_tuple_of(out_size, int)
(out_x, out_y, out_z) = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[(- 1)]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_per_voxel), dtype=torch.int)
ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method=mode)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
ret = ctx.roiaware_pool3d_for_backward
(pts_idx_of_voxels, argmax, mode, num_pts, num_channels) = ret
grad_in = grad_out.new_zeros((num_pts, num_channels))
ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method=mode)
return (None, None, grad_in, None, None, None)
|
class RoIPointPool3d(nn.Module):
'Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `Paper of PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_\n for more details.\n\n Args:\n num_sampled_points (int, optional): Number of samples in each roi.\n Default: 512.\n '
def __init__(self, num_sampled_points=512):
super().__init__()
self.num_sampled_points = num_sampled_points
def forward(self, points, point_features, boxes3d):
'\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n '
return RoIPointPool3dFunction.apply(points, point_features, boxes3d, self.num_sampled_points)
|
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
'\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n num_sampled_points (int, optional): The num of sampled points.\n Default: 512.\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n '
assert ((len(points.shape) == 3) and (points.shape[2] == 3))
(batch_size, boxes_num, feature_len) = (points.shape[0], boxes3d.shape[1], point_features.shape[2])
pooled_boxes3d = boxes3d.view(batch_size, (- 1), 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, (3 + feature_len)))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
ext_module.roipoint_pool3d_forward(points.contiguous(), pooled_boxes3d.contiguous(), point_features.contiguous(), pooled_features, pooled_empty_flag)
return (pooled_features, pooled_empty_flag)
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
|
class RotatedFeatureAlignFunction(Function):
'Using the feature interpolation to obtain the position information\n correspond to the refined rotate anchors and reconstruct the feature maps\n in pixel-wise manner to achieve feature alignment.\n\n The details are described in the paper\n `R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating\n Object <https://arxiv.org/abs/1908.05612>`_.\n '
@staticmethod
def forward(ctx, features, best_rbboxes, spatial_scale, points):
'\n Args:\n features (torch.Tensor): Input features with shape [N,C,H,W].\n best_rbboxes (torch.Tensor): Refined rotate anchors with\n shape [N,H,W,5]. Coordinate format (cx,cx,h,w,a).\n spatial_scale (float): The scale of feature map size and\n input image size.\n points (int, optional): The number of sample points.\n Only 1 and 5 are supported. Defaults to 1.\n\n Returns:\n torch.Tensor: Refined features with shape [N,C,H,W].\n '
ctx.spatial_scale = spatial_scale
ctx.points = points
ctx.save_for_backward(best_rbboxes)
assert (points in [1, 5])
output = torch.zeros_like(features)
ext_module.rotated_feature_align_forward(features, best_rbboxes, output, spatial_scale=spatial_scale, points=points)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
'\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [N,C,H,W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape [N,C,H,W].\n '
best_rbboxes = ctx.saved_tensors[0]
points = ctx.points
spatial_scale = ctx.spatial_scale
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = torch.zeros_like(grad_output)
ext_module.rotated_feature_align_backward(grad_output.contiguous(), best_rbboxes, grad_input, spatial_scale=spatial_scale, points=points)
return (grad_input, None, None, None)
|
def rotated_feature_align(features, best_rbboxes, spatial_scale=(1 / 8), points=1):
return RotatedFeatureAlignFunction.apply(features, best_rbboxes, spatial_scale, points)
|
@CONV_LAYERS.register_module(name='SAC')
class SAConv2d(ConvAWS2d):
"SAC (Switchable Atrous Convolution)\n\n This is an implementation of `DetectoRS: Detecting Objects with Recursive\n Feature Pyramid and Switchable Atrous Convolution\n <https://arxiv.org/abs/2006.02334>`_.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n use_deform: If ``True``, replace convolution with deformable\n convolution. Default: ``False``.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, use_deform=False):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.use_deform = use_deform
self.switch = nn.Conv2d(self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
self.pre_context = nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, bias=True)
self.post_context = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=True)
if self.use_deform:
self.offset_s = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.offset_l = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.init_weights()
def init_weights(self):
constant_init(self.switch, 0, bias=1)
self.weight_diff.data.zero_()
constant_init(self.pre_context, 0)
constant_init(self.post_context, 0)
if self.use_deform:
constant_init(self.offset_s, 0)
constant_init(self.offset_l, 0)
def forward(self, x):
avg_x = F.adaptive_avg_pool2d(x, output_size=1)
avg_x = self.pre_context(avg_x)
avg_x = avg_x.expand_as(x)
x = (x + avg_x)
avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
switch = self.switch(avg_x)
weight = self._get_weight(self.weight)
zero_bias = torch.zeros(self.out_channels, device=weight.device, dtype=weight.dtype)
if self.use_deform:
offset = self.offset_s(avg_x)
out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_s = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_s = super()._conv_forward(x, weight, zero_bias)
else:
out_s = super()._conv_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(((3 * p) for p in self.padding))
self.dilation = tuple(((3 * d) for d in self.dilation))
weight = (weight + self.weight_diff)
if self.use_deform:
offset = self.offset_l(avg_x)
out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_l = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_l = super()._conv_forward(x, weight, zero_bias)
else:
out_l = super()._conv_forward(x, weight)
out = ((switch * out_s) + ((1 - switch) * out_l))
self.padding = ori_p
self.dilation = ori_d
avg_x = F.adaptive_avg_pool2d(out, output_size=1)
avg_x = self.post_context(avg_x)
avg_x = avg_x.expand_as(out)
out = (out + avg_x)
return out
|
def _calculate_fan_in_and_fan_out_hwio(tensor):
dimensions = tensor.ndimension()
if (dimensions < 2):
raise ValueError('fan in and fan out can not be computed for tensorwith fewer than 2 dimensions')
if (dimensions == 2):
fan_in = tensor.size((- 2))
fan_out = tensor.size((- 1))
else:
num_input_fmaps = tensor.size((- 2))
num_output_fmaps = tensor.size((- 1))
receptive_field_size = 1
if (tensor.dim() > 2):
receptive_field_size = tensor[(..., 0, 0)].numel()
fan_in = (num_input_fmaps * receptive_field_size)
fan_out = (num_output_fmaps * receptive_field_size)
return (fan_in, fan_out)
|
class SparseConvolution(SparseModule):
def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False):
super(SparseConvolution, self).__init__()
assert (groups == 1)
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(output_padding, (list, tuple))):
output_padding = ([output_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
self.ndim = ndim
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.conv1x1 = (np.prod(kernel_size) == 1)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.inverse = inverse
self.output_padding = output_padding
self.groups = groups
self.subm = subm
self.indice_key = indice_key
self.fused_bn = fused_bn
self.weight = Parameter(torch.Tensor(*kernel_size, in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = _calculate_fan_in_and_fan_out_hwio(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
if self.transposed:
out_spatial_shape = ops.get_deconv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding)
else:
out_spatial_shape = ops.get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
if self.conv1x1:
features = torch.mm(input.features, self.weight.view(self.in_channels, self.out_channels))
if (self.bias is not None):
features += self.bias
out_tensor = SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
data = input.find_indice_pair(self.indice_key)
if self.inverse:
assert ((data is not None) and (self.indice_key is not None))
(_, outids, indice_pairs, indice_pair_num, out_spatial_shape) = data
assert (indice_pairs.shape[0] == np.prod(self.kernel_size)), 'inverse conv must have same kernel size as its couple conv'
elif ((self.indice_key is not None) and (data is not None)):
(outids, _, indice_pairs, indice_pair_num, _) = data
else:
(outids, indice_pairs, indice_pair_num) = ops.get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding, self.subm, self.transposed, grid=input.grid)
input.indice_dict[self.indice_key] = (outids, indices, indice_pairs, indice_pair_num, spatial_shape)
if self.fused_bn:
assert (self.bias is not None)
out_features = ops.fused_indice_conv(features, self.weight, self.bias, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.inverse, self.subm)
else:
if self.subm:
out_features = Fsp.indice_subm_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
elif self.inverse:
out_features = Fsp.indice_inverse_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
else:
out_features = Fsp.indice_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
if (self.bias is not None):
out_features += self.bias
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
@CONV_LAYERS.register_module()
class SparseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
class SparseConvFunction(Function):
'Sparse Convolution.\n\n Please refer to `SECOND <https://www.mdpi.com/1424-8220/18/10/3337>`_ for\n more details.\n '
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False)
return (input_bp, filters_bp, None, None, None)
|
class SparseInverseConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, True, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, True, False)
return (input_bp, filters_bp, None, None, None)
|
class SubMConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, True)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, True)
return (input_bp, filters_bp, None, None, None)
|
class SparseMaxPoolFunction(Function):
@staticmethod
def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from sparse maxpooling.\n '
out = ops.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out)
ctx.save_for_backward(indice_pairs, indice_pair_num, features, out)
return out
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, out) = ctx.saved_tensors
input_bp = ops.indice_maxpool_backward(features, out, grad_output, indice_pairs, indice_pair_num)
return (input_bp, None, None, None)
|
def is_spconv_module(module):
spconv_modules = (SparseModule,)
return isinstance(module, spconv_modules)
|
def is_sparse_conv(module):
from .sparse_conv import SparseConvolution
return isinstance(module, SparseConvolution)
|
def _mean_update(vals, m_vals, t):
outputs = []
if (not isinstance(vals, list)):
vals = [vals]
if (not isinstance(m_vals, list)):
m_vals = [m_vals]
for (val, m_val) in zip(vals, m_vals):
output = (((t / float((t + 1))) * m_val) + ((1 / float((t + 1))) * val))
outputs.append(output)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs
|
class SparseModule(nn.Module):
'place holder, All module subclass from this will take sptensor in\n SparseSequential.'
pass
|
class SparseSequential(SparseModule):
"A sequential container.\n Modules will be added to it in the order they are passed in the\n constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, given is a small example::\n\n Example:\n >>> # using Sequential:\n >>> from mmcv.ops import SparseSequential\n >>> model = SparseSequential(\n SparseConv2d(1,20,5),\n nn.ReLU(),\n SparseConv2d(20,64,5),\n nn.ReLU()\n )\n\n >>> # using Sequential with OrderedDict\n >>> model = SparseSequential(OrderedDict([\n ('conv1', SparseConv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', SparseConv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n >>> # using Sequential with kwargs(python 3.6+)\n >>> model = SparseSequential(\n conv1=SparseConv2d(1,20,5),\n relu1=nn.ReLU(),\n conv2=SparseConv2d(20,64,5),\n relu2=nn.ReLU()\n )\n "
def __init__(self, *args, **kwargs):
super(SparseSequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
for (name, module) in kwargs.items():
if (sys.version_info < (3, 6)):
raise ValueError('kwargs only supported in py36+')
if (name in self._modules):
raise ValueError('name exists.')
self.add_module(name, module)
self._sparity_dict = {}
def __getitem__(self, idx):
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
@property
def sparity_dict(self):
return self._sparity_dict
def add(self, module, name=None):
if (name is None):
name = str(len(self._modules))
if (name in self._modules):
raise KeyError('name exists')
self.add_module(name, module)
def forward(self, input):
for (k, module) in self._modules.items():
if is_spconv_module(module):
assert isinstance(input, SparseConvTensor)
self._sparity_dict[k] = input.sparity
input = module(input)
elif isinstance(input, SparseConvTensor):
if (input.indices.shape[0] != 0):
input.features = module(input.features)
else:
input = module(input)
return input
def fused(self):
from .sparse_conv import SparseConvolution
mods = [v for (k, v) in self._modules.items()]
fused_mods = []
idx = 0
while (idx < len(mods)):
if is_sparse_conv(mods[idx]):
if ((idx < (len(mods) - 1)) and isinstance(mods[(idx + 1)], nn.BatchNorm1d)):
new_module = SparseConvolution(ndim=mods[idx].ndim, in_channels=mods[idx].in_channels, out_channels=mods[idx].out_channels, kernel_size=mods[idx].kernel_size, stride=mods[idx].stride, padding=mods[idx].padding, dilation=mods[idx].dilation, groups=mods[idx].groups, bias=True, subm=mods[idx].subm, output_padding=mods[idx].output_padding, transposed=mods[idx].transposed, inverse=mods[idx].inverse, indice_key=mods[idx].indice_key, fused_bn=True)
new_module.load_state_dict(mods[idx].state_dict(), False)
new_module.to(mods[idx].weight.device)
conv = new_module
bn = mods[(idx + 1)]
conv.bias.data.zero_()
conv.weight.data[:] = ((conv.weight.data * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps))
conv.bias.data[:] = ((((conv.bias.data - bn.running_mean) * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps)) + bn.bias.data)
fused_mods.append(conv)
idx += 2
else:
fused_mods.append(mods[idx])
idx += 1
else:
fused_mods.append(mods[idx])
idx += 1
return SparseSequential(*fused_mods)
|
class ToDense(SparseModule):
'convert SparseConvTensor to NCHW dense tensor.'
def forward(self, x: SparseConvTensor):
return x.dense()
|
class RemoveGrid(SparseModule):
'remove pre-allocated grid buffer.'
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (((((input_size[i] + (2 * padding[i])) - (dilation[i] * (kernel_size[i] - 1))) - 1) // stride[i]) + 1)
if (kernel_size[i] == (- 1)):
output_size.append(1)
else:
output_size.append(size)
return output_size
|
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding):
ndim = len(input_size)
output_size = []
for i in range(ndim):
if (kernel_size[i] == (- 1)):
raise ValueError("deconv don't support kernel_size < 0")
size = (((((input_size[i] - 1) * stride[i]) - (2 * padding[i])) + kernel_size[i]) + output_padding[i])
output_size.append(size)
return output_size
|
def get_indice_pairs(indices, batch_size, spatial_shape, ksize=3, stride=1, padding=0, dilation=1, out_padding=0, subm=False, transpose=False, grid=None):
ndim = (indices.shape[1] - 1)
if (not isinstance(ksize, (list, tuple))):
ksize = ([ksize] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(out_padding, (list, tuple))):
out_padding = ([out_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
if (not subm):
if transpose:
out_shape = get_deconv_output_size(spatial_shape, ksize, stride, padding, dilation, out_padding)
else:
out_shape = get_conv_output_size(spatial_shape, ksize, stride, padding, dilation)
else:
out_shape = spatial_shape
if (grid is None):
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_forward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_forward
elif (ndim == 4):
get_indice_pairs_func = ext_module.get_indice_pairs_4d_forward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
else:
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_backward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_backward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, grid, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
|
def indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_forward(features, filters, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
else:
raise NotImplementedError
|
def fused_indice_conv(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm):
if ((features.dtype == torch.half) or (filters.dtypes == torch.float32)):
func = ext_module.fused_indice_conv_forward
else:
raise NotImplementedError
return func(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
|
def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
else:
raise NotImplementedError
|
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_forward(features, indice_pairs, indice_pair_num, num_activate_out)
else:
raise NotImplementedError
|
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num)
else:
raise NotImplementedError
|
class SparseMaxPool(SparseModule):
def __init__(self, ndim, kernel_size, stride=1, padding=0, dilation=1, subm=False):
super(SparseMaxPool, self).__init__()
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
self.ndim = ndim
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.subm = subm
self.dilation = dilation
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
out_spatial_shape = get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
(outids, indice_pairs, indice_pairs_num) = get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, 0, self.subm)
out_features = indice_maxpool(features, indice_pairs.to(device), indice_pairs_num.to(device), outids.shape[0])
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
class SparseMaxPool2d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool2d, self).__init__(2, kernel_size, stride, padding, dilation)
|
class SparseMaxPool3d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool3d, self).__init__(3, kernel_size, stride, padding, dilation)
|
class SyncBatchNormFunction(Function):
@staticmethod
def symbolic(g, input, running_mean, running_var, weight, bias, momentum, eps, group, group_size, stats_mode):
return g.op('mmcv::MMCVSyncBatchNorm', input, running_mean, running_var, weight, bias, momentum_f=momentum, eps_f=eps, group_i=group, group_size_i=group_size, stats_mode=stats_mode)
@staticmethod
def forward(self, input, running_mean, running_var, weight, bias, momentum, eps, group, group_size, stats_mode):
self.momentum = momentum
self.eps = eps
self.group = group
self.group_size = group_size
self.stats_mode = stats_mode
assert isinstance(input, (torch.HalfTensor, torch.FloatTensor, torch.cuda.HalfTensor, torch.cuda.FloatTensor)), f'only support Half or Float Tensor, but {input.type()}'
output = torch.zeros_like(input)
input3d = input.flatten(start_dim=2)
output3d = output.view_as(input3d)
num_channels = input3d.size(1)
mean = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
var = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
norm = torch.zeros_like(input3d, dtype=torch.float, device=input3d.device)
std = torch.zeros(num_channels, dtype=torch.float, device=input3d.device)
batch_size = input3d.size(0)
if (batch_size > 0):
ext_module.sync_bn_forward_mean(input3d, mean)
batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
else:
batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
vec = torch.cat([mean, batch_flag])
if (self.stats_mode == 'N'):
vec *= batch_size
if (self.group_size > 1):
dist.all_reduce(vec, group=self.group)
total_batch = vec[(- 1)].detach()
mean = vec[:num_channels]
if (self.stats_mode == 'default'):
mean = (mean / self.group_size)
elif (self.stats_mode == 'N'):
mean = (mean / total_batch.clamp(min=1))
else:
raise NotImplementedError
if (batch_size > 0):
ext_module.sync_bn_forward_var(input3d, mean, var)
if (self.stats_mode == 'N'):
var *= batch_size
if (self.group_size > 1):
dist.all_reduce(var, group=self.group)
if (self.stats_mode == 'default'):
var /= self.group_size
elif (self.stats_mode == 'N'):
var /= total_batch.clamp(min=1)
else:
raise NotImplementedError
update_flag = total_batch.clamp(max=1)
momentum = (update_flag * self.momentum)
ext_module.sync_bn_forward_output(input3d, mean, var, weight, bias, running_mean, running_var, norm, std, output3d, eps=self.eps, momentum=momentum, group_size=self.group_size)
self.save_for_backward(norm, std, weight)
return output
@staticmethod
@once_differentiable
def backward(self, grad_output):
(norm, std, weight) = self.saved_tensors
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(weight)
grad_input = torch.zeros_like(grad_output)
grad_output3d = grad_output.flatten(start_dim=2)
grad_input3d = grad_input.view_as(grad_output3d)
batch_size = grad_input3d.size(0)
if (batch_size > 0):
ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight, grad_bias)
if (self.group_size > 1):
dist.all_reduce(grad_weight, group=self.group)
dist.all_reduce(grad_bias, group=self.group)
grad_weight /= self.group_size
grad_bias /= self.group_size
if (batch_size > 0):
ext_module.sync_bn_backward_data(grad_output3d, weight, grad_weight, grad_bias, norm, std, grad_input3d)
return (grad_input, None, None, grad_weight, grad_bias, None, None, None, None, None)
|
@NORM_LAYERS.register_module(name='MMSyncBN')
class SyncBatchNorm(Module):
"Synchronized Batch Normalization.\n\n Args:\n num_features (int): number of features/chennels in input tensor\n eps (float, optional): a value added to the denominator for numerical\n stability. Defaults to 1e-5.\n momentum (float, optional): the value used for the running_mean and\n running_var computation. Defaults to 0.1.\n affine (bool, optional): whether to use learnable affine parameters.\n Defaults to True.\n track_running_stats (bool, optional): whether to track the running\n mean and variance during training. When set to False, this\n module does not track such statistics, and initializes statistics\n buffers ``running_mean`` and ``running_var`` as ``None``. When\n these buffers are ``None``, this module always uses batch\n statistics in both training and eval modes. Defaults to True.\n group (int, optional): synchronization of stats happen within\n each process group individually. By default it is synchronization\n across the whole world. Defaults to None.\n stats_mode (str, optional): The statistical mode. Available options\n includes ``'default'`` and ``'N'``. Defaults to 'default'.\n When ``stats_mode=='default'``, it computes the overall statistics\n using those from each worker with equal weight, i.e., the\n statistics are synchronized and simply divied by ``group``. This\n mode will produce inaccurate statistics when empty tensors occur.\n When ``stats_mode=='N'``, it compute the overall statistics using\n the total number of batches in each worker ignoring the number of\n group, i.e., the statistics are synchronized and then divied by\n the total batch ``N``. This mode is beneficial when empty tensors\n occur during training, as it average the total mean by the real\n number of batch.\n "
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, group=None, stats_mode='default'):
super(SyncBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
group = (dist.group.WORLD if (group is None) else group)
self.group = group
self.group_size = dist.get_world_size(group)
assert (stats_mode in ['default', 'N']), f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
self.stats_mode = stats_mode
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_buffer('running_mean', None)
self.register_buffer('running_var', None)
self.register_buffer('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, input):
if (input.dim() < 2):
raise ValueError(f'expected at least 2D input, got {input.dim()}D input')
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if (self.training and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
if (self.training or (not self.track_running_stats)):
return SyncBatchNormFunction.apply(input, self.running_mean, self.running_var, self.weight, self.bias, exponential_average_factor, self.eps, self.group, self.group_size, self.stats_mode)
else:
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, exponential_average_factor, self.eps)
def __repr__(self):
s = self.__class__.__name__
s += f'({self.num_features}, '
s += f'eps={self.eps}, '
s += f'momentum={self.momentum}, '
s += f'affine={self.affine}, '
s += f'track_running_stats={self.track_running_stats}, '
s += f'group_size={self.group_size},'
s += f'stats_mode={self.stats_mode})'
return s
|
class ThreeInterpolate(Function):
'Performs weighted linear interpolation on 3 features.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
'\n Args:\n features (torch.Tensor): (B, C, M) Features descriptors to be\n interpolated.\n indices (torch.Tensor): (B, n, 3) indices of three nearest\n neighbor features for the target features.\n weight (torch.Tensor): (B, n, 3) weights of three nearest\n neighbor features for the target features.\n\n Returns:\n torch.Tensor: (B, C, N) tensor of the interpolated features\n '
assert features.is_contiguous()
assert indices.is_contiguous()
assert weight.is_contiguous()
(B, c, m) = features.size()
n = indices.size(1)
ctx.three_interpolate_for_backward = (indices, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
ext_module.three_interpolate_forward(features, indices, weight, output, b=B, c=c, m=m, n=n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n Args:\n grad_out (torch.Tensor): (B, C, N) tensor with gradients of outputs\n\n Returns:\n torch.Tensor: (B, C, M) tensor with gradients of features\n '
(idx, weight, m) = ctx.three_interpolate_for_backward
(B, c, n) = grad_out.size()
grad_features = torch.cuda.FloatTensor(B, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
ext_module.three_interpolate_backward(grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m)
return (grad_features, None, None)
|
class ThreeNN(Function):
'Find the top-3 nearest neighbors of the target set from the source set.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, target: torch.Tensor, source: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n Args:\n target (torch.Tensor): shape (B, N, 3), points set that needs to\n find the nearest neighbors.\n source (torch.Tensor): shape (B, M, 3), points set that is used\n to find the nearest neighbors of points in target set.\n\n Returns:\n torch.Tensor: shape (B, N, 3), L2 distance of each point in target\n set to their corresponding top three nearest neighbors.\n '
target = target.contiguous()
source = source.contiguous()
(B, N, _) = target.size()
m = source.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return (torch.sqrt(dist2), idx)
@staticmethod
def backward(ctx, a=None, b=None):
return (None, None)
|
class TINShiftFunction(Function):
@staticmethod
def forward(ctx, input, shift):
C = input.size(2)
num_segments = shift.size(1)
if (((C // num_segments) <= 0) or ((C % num_segments) != 0)):
raise ValueError(f'C should be a multiple of num_segments, but got C={C} and num_segments={num_segments}.')
ctx.save_for_backward(shift)
out = torch.zeros_like(input)
ext_module.tin_shift_forward(input, shift, out)
return out
@staticmethod
def backward(ctx, grad_output):
shift = ctx.saved_tensors[0]
data_grad_input = grad_output.new(*grad_output.size()).zero_()
shift_grad_input = shift.new(*shift.size()).zero_()
ext_module.tin_shift_backward(grad_output, shift, data_grad_input)
return (data_grad_input, shift_grad_input)
|
class TINShift(nn.Module):
'Temporal Interlace Shift.\n\n Temporal Interlace shift is a differentiable temporal-wise frame shifting\n which is proposed in "Temporal Interlacing Network"\n\n Please refer to `Temporal Interlacing Network\n <https://arxiv.org/abs/2001.06499>`_ for more details.\n\n Code is modified from https://github.com/mit-han-lab/temporal-shift-module\n '
def forward(self, input, shift):
'Perform temporal interlace shift.\n\n Args:\n input (torch.Tensor): Feature map with shape\n [N, num_segments, C, H * W].\n shift (torch.Tensor): Shift tensor with shape [N, num_segments].\n\n Returns:\n Feature map after temporal interlace shift.\n '
return tin_shift(input, shift)
|
class _Voxelization(Function):
@staticmethod
def forward(ctx, points, voxel_size, coors_range, max_points=35, max_voxels=20000, deterministic=True):
'Convert kitti points(N, >=3) to voxels.\n\n Args:\n points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points\n and points[:, 3:] contain other information like reflectivity.\n voxel_size (tuple or float): The size of voxel with the shape of\n [3].\n coors_range (tuple or float): The coordinate range of voxel with\n the shape of [6].\n max_points (int, optional): maximum points contained in a voxel. if\n max_points=-1, it means using dynamic_voxelize. Default: 35.\n max_voxels (int, optional): maximum voxels this function create.\n for second, 20000 is a good choice. Users should shuffle points\n before call this function because max_voxels may drop points.\n Default: 20000.\n deterministic: bool. whether to invoke the non-deterministic\n version of hard-voxelization implementations. non-deterministic\n version is considerablly fast but is not deterministic. only\n affects hard voxelization. default True. for more information\n of this argument and the implementation insights, please refer\n to the following links:\n https://github.com/open-mmlab/mmdetection3d/issues/894\n https://github.com/open-mmlab/mmdetection3d/pull/904\n it is an experimental feature and we will appreciate it if\n you could share with us the failing cases.\n\n Returns:\n tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains three\n elements. The first one is the output voxels with the shape of\n [M, max_points, n_dim], which only contain points and returned\n when max_points != -1. The second is the voxel coordinates with\n shape of [M, 3]. The last is number of point per voxel with the\n shape of [M], which only returned when max_points != -1.\n '
if ((max_points == (- 1)) or (max_voxels == (- 1))):
coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int)
ext_module.dynamic_voxelize_forward(points, torch.tensor(voxel_size, dtype=torch.float), torch.tensor(coors_range, dtype=torch.float), coors, NDim=3)
return coors
else:
voxels = points.new_zeros(size=(max_voxels, max_points, points.size(1)))
coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int)
num_points_per_voxel = points.new_zeros(size=(max_voxels,), dtype=torch.int)
voxel_num = torch.zeros(size=(), dtype=torch.long)
ext_module.hard_voxelize_forward(points, torch.tensor(voxel_size, dtype=torch.float), torch.tensor(coors_range, dtype=torch.float), voxels, coors, num_points_per_voxel, voxel_num, max_points=max_points, max_voxels=max_voxels, NDim=3, deterministic=deterministic)
voxels_out = voxels[:voxel_num]
coors_out = coors[:voxel_num]
num_points_per_voxel_out = num_points_per_voxel[:voxel_num]
return (voxels_out, coors_out, num_points_per_voxel_out)
|
class Voxelization(nn.Module):
'Convert kitti points(N, >=3) to voxels.\n\n Please refer to `Point-Voxel CNN for Efficient 3D Deep Learning\n <https://arxiv.org/abs/1907.03739>`_ for more details.\n\n Args:\n voxel_size (tuple or float): The size of voxel with the shape of [3].\n point_cloud_range (tuple or float): The coordinate range of voxel with\n the shape of [6].\n max_num_points (int): maximum points contained in a voxel. if\n max_points=-1, it means using dynamic_voxelize.\n max_voxels (int, optional): maximum voxels this function create.\n for second, 20000 is a good choice. Users should shuffle points\n before call this function because max_voxels may drop points.\n Default: 20000.\n '
def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000, deterministic=True):
'\n Args:\n voxel_size (list): list [x, y, z] size of three dimension\n point_cloud_range (list):\n [x_min, y_min, z_min, x_max, y_max, z_max]\n max_num_points (int): max number of points per voxel\n max_voxels (tuple or int): max number of voxels in\n (training, testing) time\n deterministic: bool. whether to invoke the non-deterministic\n version of hard-voxelization implementations. non-deterministic\n version is considerablly fast but is not deterministic. only\n affects hard voxelization. default True. for more information\n of this argument and the implementation insights, please refer\n to the following links:\n https://github.com/open-mmlab/mmdetection3d/issues/894\n https://github.com/open-mmlab/mmdetection3d/pull/904\n it is an experimental feature and we will appreciate it if\n you could share with us the failing cases.\n '
super().__init__()
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.max_num_points = max_num_points
if isinstance(max_voxels, tuple):
self.max_voxels = max_voxels
else:
self.max_voxels = _pair(max_voxels)
self.deterministic = deterministic
point_cloud_range = torch.tensor(point_cloud_range, dtype=torch.float32)
voxel_size = torch.tensor(voxel_size, dtype=torch.float32)
grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size)
grid_size = torch.round(grid_size).long()
input_feat_shape = grid_size[:2]
self.grid_size = grid_size
self.pcd_shape = [*input_feat_shape, 1][::(- 1)]
def forward(self, input):
if self.training:
max_voxels = self.max_voxels[0]
else:
max_voxels = self.max_voxels[1]
return voxelization(input, self.voxel_size, self.point_cloud_range, self.max_num_points, max_voxels, self.deterministic)
def __repr__(self):
s = (self.__class__.__name__ + '(')
s += ('voxel_size=' + str(self.voxel_size))
s += (', point_cloud_range=' + str(self.point_cloud_range))
s += (', max_num_points=' + str(self.max_num_points))
s += (', max_voxels=' + str(self.max_voxels))
s += (', deterministic=' + str(self.deterministic))
s += ')'
return s
|
def scatter(input, devices, streams=None):
'Scatters tensor across multiple GPUs.'
if (streams is None):
streams = ([None] * len(devices))
if isinstance(input, list):
chunk_size = (((len(input) - 1) // len(devices)) + 1)
outputs = [scatter(input[i], [devices[(i // chunk_size)]], [streams[(i // chunk_size)]]) for i in range(len(input))]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
stream = (streams[0] if (output.numel() > 0) else None)
if (devices != [(- 1)]):
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception(f'Unknown type {type(input)}.')
|
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = (len(output) // len(devices))
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[((i * chunk_size) + j)], [devices[i]], [streams[i]])
elif isinstance(output, torch.Tensor):
if (output.numel() != 0):
with torch.cuda.device(devices[0]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[0])
output.record_stream(main_stream)
else:
raise Exception(f'Unknown type {type(output)}.')
|
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if (input_device != (- 1)):
return input_device
return (- 1)
elif isinstance(input, torch.Tensor):
return (input.get_device() if input.is_cuda else (- 1))
else:
raise Exception(f'Unknown type {type(input)}.')
|
class Scatter():
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if ((input_device == (- 1)) and (target_gpus != [(- 1)])):
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
if (streams is not None):
synchronize_stream(outputs, target_gpus, streams)
return (tuple(outputs) if isinstance(outputs, list) else (outputs,))
|
def collate(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n '
if (not isinstance(batch, Sequence)):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if (batch[i].pad_dims is not None):
ndim = batch[i].dim()
assert (ndim > batch[i].pad_dims)
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = batch[i].size((- dim))
for sample in batch[i:(i + samples_per_gpu)]:
for dim in range(0, (ndim - batch[i].pad_dims)):
assert (batch[i].size(dim) == sample.size(dim))
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
padded_samples = []
for sample in batch[i:(i + samples_per_gpu)]:
pad = [0 for _ in range((batch[i].pad_dims * 2))]
for dim in range(1, (batch[i].pad_dims + 1)):
pad[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
padded_samples.append(F.pad(sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif (batch[i].pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch[i:(i + samples_per_gpu)]]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]}
else:
return default_collate(batch)
|
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (not isinstance(args[0].data, torch.Tensor)):
raise AttributeError(f'{args[0].__class__.__name__} has no attribute {func.__name__} for type {args[0].datatype}')
return func(*args, **kwargs)
return wrapper
|
class DataContainer():
'A container for any type of objects.\n\n Typically tensors will be stacked in the collate function and sliced along\n some dimension in the scatter function. This behavior has some limitations.\n 1. All tensors have to be the same size.\n 2. Types are limited (numpy array or Tensor).\n\n We design `DataContainer` and `MMDataParallel` to overcome these\n limitations. The behavior can be either of the following.\n\n - copy to GPU, pad all tensors to the same size and stack them\n - copy to GPU without stacking\n - leave the objects as is and pass it to the model\n - pad_dims specifies the number of last few dimensions to do padding\n '
def __init__(self, data, stack=False, padding_value=0, cpu_only=False, pad_dims=2):
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert (pad_dims in [None, 1, 2, 3])
self._pad_dims = pad_dims
def __repr__(self):
return f'{self.__class__.__name__}({repr(self.data)})'
def __len__(self):
return len(self._data)
@property
def data(self):
return self._data
@property
def datatype(self):
if isinstance(self.data, torch.Tensor):
return self.data.type()
else:
return type(self.data)
@property
def cpu_only(self):
return self._cpu_only
@property
def stack(self):
return self._stack
@property
def padding_value(self):
return self._padding_value
@property
def pad_dims(self):
return self._pad_dims
@assert_tensor_type
def size(self, *args, **kwargs):
return self.data.size(*args, **kwargs)
@assert_tensor_type
def dim(self):
return self.data.dim()
|
class MMDataParallel(DataParallel):
'The DataParallel module that supports DataContainer.\n\n MMDataParallel has two main differences with PyTorch DataParallel:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data during both GPU and CPU inference.\n - It implement two more APIs ``train_step()`` and ``val_step()``.\n\n .. warning::\n MMDataParallel only supports single GPU training, if you need to\n train with multiple GPUs, please use MMDistributedDataParallel\n instead. If you have multiple GPUs and you just want to use\n MMDataParallel, you can set the environment variable\n ``CUDA_VISIBLE_DEVICES=0`` or instantiate ``MMDataParallel`` with\n ``device_ids=[0]``.\n\n Args:\n module (:class:`nn.Module`): Module to be encapsulated.\n device_ids (list[int]): Device IDS of modules to be scattered to.\n Defaults to None when GPU is not available.\n output_device (str | int): Device ID for output. Defaults to None.\n dim (int): Dimension used to scatter the data. Defaults to 0.\n '
def __init__(self, *args, dim=0, **kwargs):
super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs)
self.dim = dim
def forward(self, *inputs, **kwargs):
'Override the original forward function.\n\n The main difference lies in the CPU inference where the data in\n :class:`DataContainers` will still be gathered.\n '
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module(*inputs[0], **kwargs[0])
else:
return super().forward(*inputs, **kwargs)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module.train_step(*inputs[0], **kwargs[0])
assert (len(self.device_ids) == 1), 'MMDataParallel only supports single GPU training, if you need to train with multiple GPUs, please use MMDistributedDataParallel instead.'
for t in chain(self.module.parameters(), self.module.buffers()):
if (t.device != self.src_device_obj):
raise RuntimeError(f'module must have its parameters and buffers on device {self.src_device_obj} (device_ids[0]) but found one of them on device: {t.device}')
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
return self.module.train_step(*inputs[0], **kwargs[0])
def val_step(self, *inputs, **kwargs):
if (not self.device_ids):
(inputs, kwargs) = self.scatter(inputs, kwargs, [(- 1)])
return self.module.val_step(*inputs[0], **kwargs[0])
assert (len(self.device_ids) == 1), 'MMDataParallel only supports single GPU training, if you need to train with multiple GPUs, please use MMDistributedDataParallel instead.'
for t in chain(self.module.parameters(), self.module.buffers()):
if (t.device != self.src_device_obj):
raise RuntimeError(f'module must have its parameters and buffers on device {self.src_device_obj} (device_ids[0]) but found one of them on device: {t.device}')
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
return self.module.val_step(*inputs[0], **kwargs[0])
|
class MMDistributedDataParallel(DistributedDataParallel):
'The DDP module that supports DataContainer.\n\n MMDDP has two main differences with PyTorch DDP:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data.\n - It implement two APIs ``train_step()`` and ``val_step()``.\n '
def to_kwargs(self, inputs, kwargs, device_id):
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
'train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if getattr(self, 'require_forward_param_sync', True):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', True)):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
'val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if getattr(self, 'require_forward_param_sync', True):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', True)):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output
|
@MODULE_WRAPPERS.register_module()
class MMDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(MMDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buffers
self.broadcast_bucket_size = ((bucket_cap_mb * 1024) * 1024)
self._sync_params()
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
module_states = list(self.module.state_dict().values())
if (len(module_states) > 0):
self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size)
if self.broadcast_buffers:
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) < digit_version('1.0'))):
buffers = [b.data for b in self.module._all_buffers()]
else:
buffers = [b.data for b in self.module.buffers()]
if (len(buffers) > 0):
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
|
def scatter(inputs, target_gpus, dim=0):
'Scatter inputs to target gpus.\n\n The only difference from original :func:`scatter` is to add support for\n :type:`~mmcv.parallel.DataContainer`.\n '
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
if (target_gpus != [(- 1)]):
return OrigScatter.apply(target_gpus, None, dim, obj)
else:
return Scatter.forward(target_gpus, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if (isinstance(obj, dict) and (len(obj) > 0)):
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
'Scatter with support for kwargs dictionary.'
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs)
|
def is_module_wrapper(module):
'Check if a module is a module wrapper.\n\n The following 3 modules in MMCV (and their subclasses) are regarded as\n module wrappers: DataParallel, DistributedDataParallel,\n MMDistributedDataParallel (the deprecated version). You may add you own\n module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: True if the input module is a module wrapper.\n '
module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())
return isinstance(module, module_wrappers)
|
class BaseModule(nn.Module, metaclass=ABCMeta):
'Base module for all modules in openmmlab.\n\n ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional\n functionality of parameter initialization. Compared with\n ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.\n\n - ``init_cfg``: the config to control the initialization.\n - ``init_weights``: The function of parameter initialization and recording\n initialization information.\n - ``_params_init_info``: Used to track the parameter initialization\n information. This attribute only exists during executing the\n ``init_weights``.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, init_cfg=None):
'Initialize BaseModule, inherited from `torch.nn.Module`'
super(BaseModule, self).__init__()
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
@property
def is_init(self):
return self._is_init
def init_weights(self):
'Initialize the weights.'
is_top_level_module = False
if (not hasattr(self, '_params_init_info')):
self._params_init_info = defaultdict(dict)
is_top_level_module = True
for (name, param) in self.named_parameters():
self._params_init_info[param]['init_info'] = f'The value is the same before and after calling `init_weights` of {self.__class__.__name__} '
self._params_init_info[param]['tmp_mean_value'] = param.data.mean()
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
logger_names = list(logger_initialized.keys())
logger_name = (logger_names[0] if logger_names else 'mmcv')
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info
module_name = self.__class__.__name__
if (not self._is_init):
if self.init_cfg:
print_log(f'initialize {module_name} with init_cfg {self.init_cfg}', logger=logger_name)
initialize(self, self.init_cfg)
if isinstance(self.init_cfg, dict):
if (self.init_cfg['type'] == 'Pretrained'):
return
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights()
update_init_info(m, init_info=f'Initialized by user-defined `init_weights` in {m.__class__.__name__} ')
self._is_init = True
else:
warnings.warn(f'init_weights of {self.__class__.__name__} has been called more than once.')
if is_top_level_module:
self._dump_init_info(logger_name)
for sub_module in self.modules():
del sub_module._params_init_info
@master_only
def _dump_init_info(self, logger_name):
'Dump the initialization information to a file named\n `initialization.log.json` in workdir.\n\n Args:\n logger_name (str): The name of logger.\n '
logger = get_logger(logger_name)
with_file_handler = False
for handler in logger.handlers:
if isinstance(handler, FileHandler):
handler.stream.write('Name of parameter - Initialization information\n')
for (name, param) in self.named_parameters():
handler.stream.write(f'''
{name} - {param.shape}:
{self._params_init_info[param]['init_info']}
''')
handler.stream.flush()
with_file_handler = True
if (not with_file_handler):
for (name, param) in self.named_parameters():
print_log(f'''
{name} - {param.shape}:
{self._params_init_info[param]['init_info']}
''', logger=logger_name)
def __repr__(self):
s = super().__repr__()
if self.init_cfg:
s += f'''
init_cfg={self.init_cfg}'''
return s
|
class Sequential(BaseModule, nn.Sequential):
'Sequential module in openmmlab.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, *args, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.Sequential.__init__(self, *args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.