code stringlengths 17 6.64M |
|---|
class CARAFENaiveFunction(Function):
@staticmethod
def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
return g.op('mmcv::MMCVCARAFENaive', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor)
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
ext_module.carafe_naive_forward(features, masks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor)
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
grad_input = torch.zeros_like(features)
grad_masks = torch.zeros_like(masks)
ext_module.carafe_naive_backward(grad_output.contiguous(), features, masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor)
return (grad_input, grad_masks, None, None, None)
|
class CARAFENaive(Module):
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFENaive, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return carafe_naive(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
class CARAFEFunction(Function):
@staticmethod
def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
return g.op('mmcv::MMCVCARAFE', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor)
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
routput = features.new_zeros(output.size(), requires_grad=False)
rfeatures = features.new_zeros(features.size(), requires_grad=False)
rmasks = masks.new_zeros(masks.size(), requires_grad=False)
ext_module.carafe_forward(features, masks, rfeatures, routput, rmasks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor)
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks, rfeatures)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks, rfeatures) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input = torch.zeros_like(features, requires_grad=False)
rgrad_masks = torch.zeros_like(masks, requires_grad=False)
grad_input = torch.zeros_like(features, requires_grad=False)
grad_masks = torch.zeros_like(masks, requires_grad=False)
ext_module.carafe_backward(grad_output.contiguous(), rfeatures, masks, rgrad_output, rgrad_input_hs, rgrad_input, rgrad_masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor)
return (grad_input, grad_masks, None, None, None)
|
class CARAFE(Module):
' CARAFE: Content-Aware ReAssembly of FEatures\n\n Please refer to `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_ for more details.\n\n Args:\n kernel_size (int): reassemble kernel size\n group_size (int): reassemble group size\n scale_factor (int): upsample ratio\n\n Returns:\n upsampled feature map\n '
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFE, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return carafe(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
@UPSAMPLE_LAYERS.register_module(name='carafe')
class CARAFEPack(nn.Module):
'A unified package of CARAFE upsampler that contains: 1) channel\n compressor 2) content encoder 3) CARAFE op.\n\n Official implementation of ICCV 2019 paper\n `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n '
def __init__(self, channels, scale_factor, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64):
super(CARAFEPack, self).__init__()
self.channels = channels
self.scale_factor = scale_factor
self.up_kernel = up_kernel
self.up_group = up_group
self.encoder_kernel = encoder_kernel
self.encoder_dilation = encoder_dilation
self.compressed_channels = compressed_channels
self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, 1)
self.content_encoder = nn.Conv2d(self.compressed_channels, ((((self.up_kernel * self.up_kernel) * self.up_group) * self.scale_factor) * self.scale_factor), self.encoder_kernel, padding=int((((self.encoder_kernel - 1) * self.encoder_dilation) / 2)), dilation=self.encoder_dilation, groups=1)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
normal_init(self.content_encoder, std=0.001)
def kernel_normalizer(self, mask):
mask = F.pixel_shuffle(mask, self.scale_factor)
(n, mask_c, h, w) = mask.size()
mask_channel = int((mask_c / float((self.up_kernel ** 2))))
mask = mask.view(n, mask_channel, (- 1), h, w)
mask = F.softmax(mask, dim=2, dtype=mask.dtype)
mask = mask.view(n, mask_c, h, w).contiguous()
return mask
def feature_reassemble(self, x, mask):
x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)
return x
def forward(self, x):
compressed_x = self.channel_compressor(x)
mask = self.content_encoder(compressed_x)
mask = self.kernel_normalizer(mask)
x = self.feature_reassemble(x, mask)
return x
|
def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num):
'Expand kernel contours so that foreground pixels are assigned into\n instances.\n\n Args:\n kernel_mask (np.array or torch.Tensor): The instance kernel mask with\n size hxw.\n internal_kernel_label (np.array or torch.Tensor): The instance internal\n kernel label with size hxw.\n min_kernel_area (int): The minimum kernel area.\n kernel_num (int): The instance kernel number.\n\n Returns:\n list: The instance index map with size hxw.\n '
assert isinstance(kernel_mask, (torch.Tensor, np.ndarray))
assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray))
assert isinstance(min_kernel_area, int)
assert isinstance(kernel_num, int)
if isinstance(kernel_mask, np.ndarray):
kernel_mask = torch.from_numpy(kernel_mask)
if isinstance(internal_kernel_label, np.ndarray):
internal_kernel_label = torch.from_numpy(internal_kernel_label)
if (torch.__version__ == 'parrots'):
if ((kernel_mask.shape[0] == 0) or (internal_kernel_label.shape[0] == 0)):
label = []
else:
label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area=min_kernel_area, kernel_num=kernel_num)
label = label.tolist()
else:
label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num)
return label
|
class TopPoolFunction(Function):
@staticmethod
def symbolic(g, input):
output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top']))
return output
@staticmethod
def forward(ctx, input):
output = ext_module.top_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
output = ext_module.top_pool_backward(input, grad_output)
return output
|
class BottomPoolFunction(Function):
@staticmethod
def symbolic(g, input):
output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom']))
return output
@staticmethod
def forward(ctx, input):
output = ext_module.bottom_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
output = ext_module.bottom_pool_backward(input, grad_output)
return output
|
class LeftPoolFunction(Function):
@staticmethod
def symbolic(g, input):
output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left']))
return output
@staticmethod
def forward(ctx, input):
output = ext_module.left_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
output = ext_module.left_pool_backward(input, grad_output)
return output
|
class RightPoolFunction(Function):
@staticmethod
def symbolic(g, input):
output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right']))
return output
@staticmethod
def forward(ctx, input):
output = ext_module.right_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
output = ext_module.right_pool_backward(input, grad_output)
return output
|
class CornerPool(nn.Module):
"Corner Pooling.\n\n Corner Pooling is a new type of pooling layer that helps a\n convolutional network better localize corners of bounding boxes.\n\n Please refer to `CornerNet: Detecting Objects as Paired Keypoints\n <https://arxiv.org/abs/1808.01244>`_ for more details.\n\n Code is modified from https://github.com/princeton-vl/CornerNet-Lite.\n\n Args:\n mode (str): Pooling orientation for the pooling layer\n\n - 'bottom': Bottom Pooling\n - 'left': Left Pooling\n - 'right': Right Pooling\n - 'top': Top Pooling\n\n Returns:\n Feature map after pooling.\n "
pool_functions = {'bottom': BottomPoolFunction, 'left': LeftPoolFunction, 'right': RightPoolFunction, 'top': TopPoolFunction}
cummax_dim_flip = {'bottom': (2, False), 'left': (3, True), 'right': (3, False), 'top': (2, True)}
def __init__(self, mode):
super(CornerPool, self).__init__()
assert (mode in self.pool_functions)
self.mode = mode
self.corner_pool = self.pool_functions[mode]
def forward(self, x):
if ((torch.__version__ != 'parrots') and (torch.__version__ >= '1.5.0')):
if torch.onnx.is_in_onnx_export():
assert (torch.__version__ >= '1.7.0'), "When `cummax` serves as an intermediate component whose outputs is used as inputs for another modules, it's expected that pytorch version must be >= 1.7.0, otherwise Error appears like: `RuntimeError: tuple appears in op that does not forward tuples, unsupported kind: prim::PythonOp`."
(dim, flip) = self.cummax_dim_flip[self.mode]
if flip:
x = x.flip(dim)
(pool_tensor, _) = torch.cummax(x, dim=dim)
if flip:
pool_tensor = pool_tensor.flip(dim)
return pool_tensor
else:
return self.corner_pool.apply(x)
|
class CorrelationFunction(Function):
@staticmethod
def forward(ctx, input1, input2, kernel_size=1, max_displacement=1, stride=1, padding=1, dilation=1, dilation_patch=1):
ctx.save_for_backward(input1, input2)
(kH, kW) = ctx.kernel_size = _pair(kernel_size)
patch_size = ((max_displacement * 2) + 1)
ctx.patch_size = patch_size
(dH, dW) = ctx.stride = _pair(stride)
(padH, padW) = ctx.padding = _pair(padding)
(dilationH, dilationW) = ctx.dilation = _pair(dilation)
(dilation_patchH, dilation_patchW) = ctx.dilation_patch = _pair(dilation_patch)
output_size = CorrelationFunction._output_size(ctx, input1)
output = input1.new_zeros(output_size)
ext_module.correlation_forward(input1, input2, output, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input1, input2) = ctx.saved_tensors
(kH, kW) = ctx.kernel_size
patch_size = ctx.patch_size
(padH, padW) = ctx.padding
(dilationH, dilationW) = ctx.dilation
(dilation_patchH, dilation_patchW) = ctx.dilation_patch
(dH, dW) = ctx.stride
grad_input1 = torch.zeros_like(input1)
grad_input2 = torch.zeros_like(input2)
ext_module.correlation_backward(grad_output, input1, input2, grad_input1, grad_input2, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW)
return (grad_input1, grad_input2, None, None, None, None, None, None)
@staticmethod
def _output_size(ctx, input1):
(iH, iW) = (input1.size(2), input1.size(3))
batch_size = input1.size(0)
(kH, kW) = ctx.kernel_size
patch_size = ctx.patch_size
(dH, dW) = ctx.stride
(padH, padW) = ctx.padding
(dilationH, dilationW) = ctx.dilation
dilatedKH = (((kH - 1) * dilationH) + 1)
dilatedKW = (((kW - 1) * dilationW) + 1)
oH = int(((((iH + (2 * padH)) - dilatedKH) / dH) + 1))
oW = int(((((iW + (2 * padW)) - dilatedKW) / dW) + 1))
output_size = (batch_size, patch_size, patch_size, oH, oW)
return output_size
|
class Correlation(nn.Module):
"Correlation operator\n\n This correlation operator works for optical flow correlation computation.\n\n There are two batched tensors with shape :math:`(N, C, H, W)`,\n and the correlation output's shape is :math:`(N, max\\_displacement \\times\n 2 + 1, max\\_displacement * 2 + 1, H_{out}, W_{out})`\n\n where\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times padding -\n dilation \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times padding - dilation\n \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding\n window convolution between input1 and shifted input2,\n\n .. math::\n Corr(N_i, dx, dy) =\n \\sum_{c=0}^{C-1}\n input1(N_i, c) \\star\n \\mathcal{S}(input2(N_i, c), dy, dx)\n\n where :math:`\\star` is the valid 2d sliding window convolution operator,\n and :math:`\\mathcal{S}` means shifting the input features (auto-complete\n zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \\in\n [-max\\_displacement \\times dilation\\_patch, max\\_displacement \\times\n dilation\\_patch]`.\n\n Args:\n kernel_size (int): The size of sliding window i.e. local neighborhood\n representing the center points and involved in correlation\n computation. Defaults to 1.\n max_displacement (int): The radius for computing correlation volume,\n but the actual working space can be dilated by dilation_patch.\n Defaults to 1.\n stride (int): The stride of the sliding blocks in the input spatial\n dimensions. Defaults to 1.\n padding (int): Zero padding added to all four sides of the input1.\n Defaults to 0.\n dilation (int): The spacing of local neighborhood that will involved\n in correlation. Defaults to 1.\n dilation_patch (int): The spacing between position need to compute\n correlation. Defaults to 1.\n "
def __init__(self, kernel_size: int=1, max_displacement: int=1, stride: int=1, padding: int=0, dilation: int=1, dilation_patch: int=1) -> None:
super().__init__()
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride = stride
self.padding = padding
self.dilation = dilation
self.dilation_patch = dilation_patch
def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
return CorrelationFunction.apply(input1, input2, self.kernel_size, self.max_displacement, self.stride, self.padding, self.dilation, self.dilation_patch)
def __repr__(self) -> str:
s = self.__class__.__name__
s += f'(kernel_size={self.kernel_size}, '
s += f'max_displacement={self.max_displacement}, '
s += f'stride={self.stride}, '
s += f'padding={self.padding}, '
s += f'dilation={self.dilation}, '
s += f'dilation_patch={self.dilation_patch})'
return s
|
class DeformRoIPoolFunction(Function):
@staticmethod
def symbolic(g, input, rois, offset, output_size, spatial_scale, sampling_ratio, gamma):
return g.op('mmcv::MMCVDeformRoIPool', input, rois, offset, pooled_height_i=output_size[0], pooled_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_f=sampling_ratio, gamma_f=gamma)
@staticmethod
def forward(ctx, input, rois, offset, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
if (offset is None):
offset = input.new_zeros(0)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = float(spatial_scale)
ctx.sampling_ratio = int(sampling_ratio)
ctx.gamma = float(gamma)
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
ext_module.deform_roi_pool_forward(input, rois, offset, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
ctx.save_for_backward(input, rois, offset)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, rois, offset) = ctx.saved_tensors
grad_input = grad_output.new_zeros(input.shape)
grad_offset = grad_output.new_zeros(offset.shape)
ext_module.deform_roi_pool_backward(grad_output, input, rois, offset, grad_input, grad_offset, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
if (grad_offset.numel() == 0):
grad_offset = None
return (grad_input, None, grad_offset, None, None, None, None)
|
class DeformRoIPool(nn.Module):
def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
super(DeformRoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.gamma = float(gamma)
def forward(self, input, rois, offset=None):
return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
|
class DeformRoIPoolPack(DeformRoIPool):
def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
self.output_channels = output_channels
self.deform_fc_channels = deform_fc_channels
self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2)))
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
def forward(self, input, rois):
assert (input.size(1) == self.output_channels)
x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
rois_num = rois.size(0)
offset = self.offset_fc(x.view(rois_num, (- 1)))
offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1])
return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
|
class ModulatedDeformRoIPoolPack(DeformRoIPool):
def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
super(ModulatedDeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
self.output_channels = output_channels
self.deform_fc_channels = deform_fc_channels
self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2)))
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
self.mask_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 1)), nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, input, rois):
assert (input.size(1) == self.output_channels)
x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
rois_num = rois.size(0)
offset = self.offset_fc(x.view(rois_num, (- 1)))
offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1])
mask = self.mask_fc(x.view(rois_num, (- 1)))
mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1])
d = deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
return (d * mask)
|
class Conv2d_deprecated(Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn('Importing Conv2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
|
class ConvTranspose2d_deprecated(ConvTranspose2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn('Importing ConvTranspose2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
|
class MaxPool2d_deprecated(MaxPool2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn('Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
|
class Linear_deprecated(Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn('Importing Linear wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
|
class SigmoidFocalLossFunction(Function):
@staticmethod
def symbolic(g, input, target, gamma, alpha, weight, reduction):
return g.op('mmcv::MMCVSigmoidFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction)
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'):
assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
assert (input.dim() == 2)
assert (target.dim() == 1)
assert (input.size(0) == target.size(0))
if (weight is None):
weight = input.new_empty(0)
else:
assert (weight.dim() == 1)
assert (input.size(1) == weight.size(0))
ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
assert (reduction in ctx.reduction_dict.keys())
ctx.gamma = float(gamma)
ctx.alpha = float(alpha)
ctx.reduction = ctx.reduction_dict[reduction]
output = input.new_zeros(input.size())
ext_module.sigmoid_focal_loss_forward(input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha)
if (ctx.reduction == ctx.reduction_dict['mean']):
output = (output.sum() / input.size(0))
elif (ctx.reduction == ctx.reduction_dict['sum']):
output = output.sum()
ctx.save_for_backward(input, target, weight)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, target, weight) = ctx.saved_tensors
grad_input = input.new_zeros(input.size())
ext_module.sigmoid_focal_loss_backward(input, target, weight, grad_input, gamma=ctx.gamma, alpha=ctx.alpha)
grad_input *= grad_output
if (ctx.reduction == ctx.reduction_dict['mean']):
grad_input /= input.size(0)
return (grad_input, None, None, None, None, None)
|
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha, weight=None, reduction='mean'):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.register_buffer('weight', weight)
self.reduction = reduction
def forward(self, input, target):
return sigmoid_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction)
def __repr__(self):
s = self.__class__.__name__
s += f'(gamma={self.gamma}, '
s += f'alpha={self.alpha}, '
s += f'reduction={self.reduction})'
return s
|
class SoftmaxFocalLossFunction(Function):
@staticmethod
def symbolic(g, input, target, gamma, alpha, weight, reduction):
return g.op('mmcv::MMCVSoftmaxFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction)
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'):
assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
assert (input.dim() == 2)
assert (target.dim() == 1)
assert (input.size(0) == target.size(0))
if (weight is None):
weight = input.new_empty(0)
else:
assert (weight.dim() == 1)
assert (input.size(1) == weight.size(0))
ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
assert (reduction in ctx.reduction_dict.keys())
ctx.gamma = float(gamma)
ctx.alpha = float(alpha)
ctx.reduction = ctx.reduction_dict[reduction]
(channel_stats, _) = torch.max(input, dim=1)
input_softmax = (input - channel_stats.unsqueeze(1).expand_as(input))
input_softmax.exp_()
channel_stats = input_softmax.sum(dim=1)
input_softmax /= channel_stats.unsqueeze(1).expand_as(input)
output = input.new_zeros(input.size(0))
ext_module.softmax_focal_loss_forward(input_softmax, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha)
if (ctx.reduction == ctx.reduction_dict['mean']):
output = (output.sum() / input.size(0))
elif (ctx.reduction == ctx.reduction_dict['sum']):
output = output.sum()
ctx.save_for_backward(input_softmax, target, weight)
return output
@staticmethod
def backward(ctx, grad_output):
(input_softmax, target, weight) = ctx.saved_tensors
buff = input_softmax.new_zeros(input_softmax.size(0))
grad_input = input_softmax.new_zeros(input_softmax.size())
ext_module.softmax_focal_loss_backward(input_softmax, target, weight, buff, grad_input, gamma=ctx.gamma, alpha=ctx.alpha)
grad_input *= grad_output
if (ctx.reduction == ctx.reduction_dict['mean']):
grad_input /= input_softmax.size(0)
return (grad_input, None, None, None, None, None)
|
class SoftmaxFocalLoss(nn.Module):
def __init__(self, gamma, alpha, weight=None, reduction='mean'):
super(SoftmaxFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.register_buffer('weight', weight)
self.reduction = reduction
def forward(self, input, target):
return softmax_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction)
def __repr__(self):
s = self.__class__.__name__
s += f'(gamma={self.gamma}, '
s += f'alpha={self.alpha}, '
s += f'reduction={self.reduction})'
return s
|
class FurthestPointSampling(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_xyz: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n points_xyz (torch.Tensor): (B, N, 3) where N > num_points.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n '
assert points_xyz.is_contiguous()
(B, N) = points_xyz.size()[:2]
output = torch.cuda.IntTensor(B, num_points)
temp = torch.cuda.FloatTensor(B, N).fill_(10000000000.0)
ext_module.furthest_point_sampling_forward(points_xyz, temp, output, b=B, n=N, m=num_points)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(xyz, a=None):
return (None, None)
|
class FurthestPointSamplingWithDist(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n points_dist (torch.Tensor): (B, N, N) Distance between each point\n pair.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n '
assert points_dist.is_contiguous()
(B, N, _) = points_dist.size()
output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
temp = points_dist.new_zeros([B, N]).fill_(10000000000.0)
ext_module.furthest_point_sampling_with_dist_forward(points_dist, temp, output, b=B, n=N, m=num_points)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(xyz, a=None):
return (None, None)
|
class FusedBiasLeakyReLUFunctionBackward(Function):
'Calculate second order deviation.\n\n This function is to compute the second order deviation for the fused leaky\n relu operation.\n '
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = ext_module.fused_bias_leakyrelu(grad_output, empty, out, act=3, grad=1, alpha=negative_slope, scale=scale)
dim = [0]
if (grad_input.ndim > 2):
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return (grad_input, grad_bias)
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
(out,) = ctx.saved_tensors
gradgrad_out = ext_module.fused_bias_leakyrelu(gradgrad_input, gradgrad_bias.to(out.dtype), out, act=3, grad=1, alpha=ctx.negative_slope, scale=ctx.scale)
return (gradgrad_out, None, None, None)
|
class FusedBiasLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = ext_module.fused_bias_leakyrelu(input, bias, empty, act=3, grad=0, alpha=negative_slope, scale=scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
(out,) = ctx.saved_tensors
(grad_input, grad_bias) = FusedBiasLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale)
return (grad_input, grad_bias, None, None)
|
class FusedBiasLeakyReLU(nn.Module):
'Fused bias leaky ReLU.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n TODO: Implement the CPU version.\n\n Args:\n channel (int): The channel number of the feature map.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n '
def __init__(self, num_channels, negative_slope=0.2, scale=(2 ** 0.5)):
super(FusedBiasLeakyReLU, self).__init__()
self.bias = nn.Parameter(torch.zeros(num_channels))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_bias_leakyrelu(input, self.bias, self.negative_slope, self.scale)
|
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)):
'Fused bias leaky ReLU function.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n Args:\n input (torch.Tensor): Input feature map.\n bias (nn.Parameter): The bias from convolution operation.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n\n Returns:\n torch.Tensor: Feature map after non-linear activation.\n '
if (not input.is_cuda):
return bias_leakyrelu_ref(input, bias, negative_slope, scale)
return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype), negative_slope, scale)
|
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=(2 ** 0.5)):
if (bias is not None):
assert (bias.ndim == 1)
assert (bias.shape[0] == x.shape[1])
x = (x + bias.reshape([((- 1) if (i == 1) else 1) for i in range(x.ndim)]))
x = F.leaky_relu(x, negative_slope)
if (scale != 1):
x = (x * scale)
return x
|
class GatherPoints(Function):
'Gather points with given index.'
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
'\n Args:\n features (torch.Tensor): (B, C, N) features to gather.\n indices (torch.Tensor): (B, M) where M is the number of points.\n\n Returns:\n torch.Tensor: (B, C, M) where M is the number of points.\n '
assert features.is_contiguous()
assert indices.is_contiguous()
(B, npoint) = indices.size()
(_, C, N) = features.size()
output = features.new_zeros((B, C, npoint))
ext_module.gather_points_forward(features, indices, output, b=B, c=C, n=N, npoints=npoint)
ctx.for_backwards = (indices, C, N)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(indices)
return output
@staticmethod
def backward(ctx, grad_out):
(idx, C, N) = ctx.for_backwards
(B, npoint) = idx.size()
grad_features = grad_out.new_zeros((B, C, N))
grad_out_data = grad_out.data.contiguous()
ext_module.gather_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint)
return (grad_features, None)
|
def get_onnxruntime_op_path():
wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_ort.*.so')
paths = glob.glob(wildcard)
if (len(paths) > 0):
return paths[0]
else:
return ''
|
def boxes_iou_bev(boxes_a, boxes_b):
"Calculate boxes IoU in the Bird's Eye View.\n\n Args:\n boxes_a (torch.Tensor): Input boxes a with shape (M, 5).\n boxes_b (torch.Tensor): Input boxes b with shape (N, 5).\n\n Returns:\n torch.Tensor: IoU result with shape (M, N).\n "
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
|
def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
'NMS function GPU implementation (for BEV boxes). The overlap of two\n boxes for IoU calculation is defined as the exact overlapping area of the\n two boxes. In this function, one can also set ``pre_max_size`` and\n ``post_max_size``.\n\n Args:\n boxes (torch.Tensor): Input boxes with the shape of [N, 5]\n ([x1, y1, x2, y2, ry]).\n scores (torch.Tensor): Scores of boxes with the shape of [N].\n thresh (float): Overlap threshold of NMS.\n pre_max_size (int, optional): Max size of boxes before NMS.\n Default: None.\n post_max_size (int, optional): Max size of boxes after NMS.\n Default: None.\n\n Returns:\n torch.Tensor: Indexes after NMS.\n '
assert (boxes.size(1) == 5), 'Input boxes shape should be [N, 5]'
order = scores.sort(0, descending=True)[1]
if (pre_max_size is not None):
order = order[:pre_max_size]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = torch.zeros(size=(), dtype=torch.long)
ext_module.iou3d_nms_forward(boxes, keep, num_out, nms_overlap_thresh=thresh)
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
if (post_max_size is not None):
keep = keep[:post_max_size]
return keep
|
def nms_normal_bev(boxes, scores, thresh):
'Normal NMS function GPU implementation (for BEV boxes). The overlap of\n two boxes for IoU calculation is defined as the exact overlapping area of\n the two boxes WITH their yaw angle set to 0.\n\n Args:\n boxes (torch.Tensor): Input boxes with shape (N, 5).\n scores (torch.Tensor): Scores of predicted boxes with shape (N).\n thresh (float): Overlap threshold of NMS.\n\n Returns:\n torch.Tensor: Remaining indices with scores in descending order.\n '
assert (boxes.shape[1] == 5), 'Input boxes shape should be [N, 5]'
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = torch.zeros(size=(), dtype=torch.long)
ext_module.iou3d_nms_normal_forward(boxes, keep, num_out, nms_overlap_thresh=thresh)
return order[keep[:num_out].cuda(boxes.device)].contiguous()
|
class KNN(Function):
'KNN (CUDA) based on heap data structure.\n\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/pointops/src/knnquery_heap>`_.\n\n Find k-nearest points.\n '
@staticmethod
def forward(ctx, k: int, xyz: torch.Tensor, center_xyz: torch.Tensor=None, transposed: bool=False) -> torch.Tensor:
'\n Args:\n k (int): number of nearest neighbors.\n xyz (torch.Tensor): (B, N, 3) if transposed == False, else\n (B, 3, N). xyz coordinates of the features.\n center_xyz (torch.Tensor, optional): (B, npoint, 3) if transposed\n is False, else (B, 3, npoint). centers of the knn query.\n Default: None.\n transposed (bool, optional): whether the input tensors are\n transposed. Should not explicitly use this keyword when\n calling knn (=KNN.apply), just add the fourth param.\n Default: False.\n\n Returns:\n torch.Tensor: (B, k, npoint) tensor with the indices of the\n features that form k-nearest neighbours.\n '
assert ((k > 0) & (k < 100)), 'k should be in range(0, 100)'
if (center_xyz is None):
center_xyz = xyz
if transposed:
xyz = xyz.transpose(2, 1).contiguous()
center_xyz = center_xyz.transpose(2, 1).contiguous()
assert xyz.is_contiguous()
assert center_xyz.is_contiguous()
center_xyz_device = center_xyz.get_device()
assert (center_xyz_device == xyz.get_device()), 'center_xyz and xyz should be put on the same device'
if (torch.cuda.current_device() != center_xyz_device):
torch.cuda.set_device(center_xyz_device)
(B, npoint, _) = center_xyz.shape
N = xyz.shape[1]
idx = center_xyz.new_zeros((B, npoint, k)).int()
dist2 = center_xyz.new_zeros((B, npoint, k)).float()
ext_module.knn_forward(xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k)
idx = idx.transpose(2, 1).contiguous()
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return idx
@staticmethod
def backward(ctx, a=None):
return (None, None, None)
|
class BaseMergeCell(nn.Module):
'The basic class for cells used in NAS-FPN and NAS-FCOS.\n\n BaseMergeCell takes 2 inputs. After applying convolution\n on them, they are resized to the target size. Then,\n they go through binary_op, which depends on the type of cell.\n If with_out_conv is True, the result of output will go through\n another convolution layer.\n\n Args:\n in_channels (int): number of input channels in out_conv layer.\n out_channels (int): number of output channels in out_conv layer.\n with_out_conv (bool): Whether to use out_conv layer\n out_conv_cfg (dict): Config dict for convolution layer, which should\n contain "groups", "kernel_size", "padding", "bias" to build\n out_conv layer.\n out_norm_cfg (dict): Config dict for normalization layer in out_conv.\n out_conv_order (tuple): The order of conv/norm/activation layers in\n out_conv.\n with_input1_conv (bool): Whether to use convolution on input1.\n with_input2_conv (bool): Whether to use convolution on input2.\n input_conv_cfg (dict): Config dict for building input1_conv layer and\n input2_conv layer, which is expected to contain the type of\n convolution.\n Default: None, which means using conv2d.\n input_norm_cfg (dict): Config dict for normalization layer in\n input1_conv and input2_conv layer. Default: None.\n upsample_mode (str): Interpolation method used to resize the output\n of input1_conv and input2_conv to target size. Currently, we\n support [\'nearest\', \'bilinear\']. Default: \'nearest\'.\n '
def __init__(self, fused_channels=256, out_channels=256, with_out_conv=True, out_conv_cfg=dict(groups=1, kernel_size=3, padding=1, bias=True), out_norm_cfg=None, out_conv_order=('act', 'conv', 'norm'), with_input1_conv=False, with_input2_conv=False, input_conv_cfg=None, input_norm_cfg=None, upsample_mode='nearest'):
super(BaseMergeCell, self).__init__()
assert (upsample_mode in ['nearest', 'bilinear'])
self.with_out_conv = with_out_conv
self.with_input1_conv = with_input1_conv
self.with_input2_conv = with_input2_conv
self.upsample_mode = upsample_mode
if self.with_out_conv:
self.out_conv = ConvModule(fused_channels, out_channels, **out_conv_cfg, norm_cfg=out_norm_cfg, order=out_conv_order)
self.input1_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input1_conv else nn.Sequential())
self.input2_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input2_conv else nn.Sequential())
def _build_input_conv(self, channel, conv_cfg, norm_cfg):
return ConvModule(channel, channel, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True)
@abstractmethod
def _binary_op(self, x1, x2):
pass
def _resize(self, x, size):
if (x.shape[(- 2):] == size):
return x
elif (x.shape[(- 2):] < size):
return F.interpolate(x, size=size, mode=self.upsample_mode)
else:
assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0))
kernel_size = (x.shape[(- 1)] // size[(- 1)])
x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
return x
def forward(self, x1, x2, out_size=None):
assert (x1.shape[:2] == x2.shape[:2])
assert ((out_size is None) or (len(out_size) == 2))
if (out_size is None):
out_size = max(x1.size()[2:], x2.size()[2:])
x1 = self.input1_conv(x1)
x2 = self.input2_conv(x2)
x1 = self._resize(x1, out_size)
x2 = self._resize(x2, out_size)
x = self._binary_op(x1, x2)
if self.with_out_conv:
x = self.out_conv(x)
return x
|
class SumCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(SumCell, self).__init__(in_channels, out_channels, **kwargs)
def _binary_op(self, x1, x2):
return (x1 + x2)
|
class ConcatCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConcatCell, self).__init__((in_channels * 2), out_channels, **kwargs)
def _binary_op(self, x1, x2):
ret = torch.cat([x1, x2], dim=1)
return ret
|
class GlobalPoolingCell(BaseMergeCell):
def __init__(self, in_channels=None, out_channels=None, **kwargs):
super().__init__(in_channels, out_channels, **kwargs)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
def _binary_op(self, x1, x2):
x2_att = self.global_pool(x2).sigmoid()
return (x2 + (x2_att * x1))
|
def min_area_polygons(pointsets):
'Find the smallest polygons that surrounds all points in the point sets.\n\n Args:\n pointsets (Tensor): point sets with shape (N, 18).\n\n Returns:\n torch.Tensor: Return the smallest polygons with shape (N, 8).\n '
polygons = pointsets.new_zeros((pointsets.size(0), 8))
ext_module.min_area_polygons(pointsets, polygons)
return polygons
|
class ModulatedDeformConv2dFunction(Function):
@staticmethod
def symbolic(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups):
input_tensors = [input, offset, mask, weight]
if (bias is not None):
input_tensors.append(bias)
return g.op('mmcv::MMCVModulatedDeformConv2d', *input_tensors, stride_i=stride, padding_i=padding, dilation_i=dilation, groups_i=groups, deform_groups_i=deform_groups)
@staticmethod
def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deform_groups=1):
if ((input is not None) and (input.dim() != 4)):
raise ValueError(f'Expected 4D tensor as input, got {input.dim()}D tensor instead.')
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deform_groups = deform_groups
ctx.with_bias = (bias is not None)
if (not ctx.with_bias):
bias = input.new_empty(0)
input = input.type_as(offset)
weight = weight.type_as(input)
bias = bias.type_as(input)
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(ModulatedDeformConv2dFunction._output_size(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
ext_module.modulated_deform_conv_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, offset, mask, weight, bias) = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
grad_output = grad_output.contiguous()
ext_module.modulated_deform_conv_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias)
if (not ctx.with_bias):
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None)
@staticmethod
def _output_size(ctx, input, weight):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range((input.dim() - 2)):
in_size = input.size((d + 2))
pad = ctx.padding[d]
kernel = ((ctx.dilation[d] * (weight.size((d + 2)) - 1)) + 1)
stride_ = ctx.stride[d]
output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),)
if (not all(map((lambda s: (s > 0)), output_size))):
raise ValueError((('convolution input is too small (output would be ' + 'x'.join(map(str, output_size))) + ')'))
return output_size
|
class ModulatedDeformConv2d(nn.Module):
@deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='ModulatedDeformConv2d')
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=True):
super(ModulatedDeformConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deform_groups = deform_groups
self.transposed = False
self.output_padding = _single(0)
self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // groups), *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.init_weights()
def init_weights(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.zero_()
def forward(self, x, offset, mask):
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
|
@CONV_LAYERS.register_module('DCNv2')
class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
'A ModulatedDeformable Conv Encapsulation that acts as normal Conv\n layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int): Same as nn.Conv2d, while tuple is not supported.\n padding (int): Same as nn.Conv2d, while tuple is not supported.\n dilation (int): Same as nn.Conv2d, while tuple is not supported.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n '
_version = 2
def __init__(self, *args, **kwargs):
super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deform_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation, bias=True)
self.init_weights()
def init_weights(self):
super(ModulatedDeformConv2dPack, self).init_weights()
if hasattr(self, 'conv_offset'):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x):
out = self.conv_offset(x)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
if (((prefix + 'conv_offset.weight') not in state_dict) and ((prefix[:(- 1)] + '_offset.weight') in state_dict)):
state_dict[(prefix + 'conv_offset.weight')] = state_dict.pop((prefix[:(- 1)] + '_offset.weight'))
if (((prefix + 'conv_offset.bias') not in state_dict) and ((prefix[:(- 1)] + '_offset.bias') in state_dict)):
state_dict[(prefix + 'conv_offset.bias')] = state_dict.pop((prefix[:(- 1)] + '_offset.bias'))
if ((version is not None) and (version > 1)):
print_log(f"ModulatedDeformConvPack {prefix.rstrip('.')} is upgraded to version 2.", logger='root')
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
|
def pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold):
'Group pixels into text instances, which is widely used text detection\n methods.\n\n Arguments:\n score (np.array or torch.Tensor): The foreground score with size hxw.\n mask (np.array or Tensor): The foreground mask with size hxw.\n embedding (np.array or torch.Tensor): The embedding with size hxwxc to\n distinguish instances.\n kernel_label (np.array or torch.Tensor): The instance kernel index with\n size hxw.\n kernel_contour (np.array or torch.Tensor): The kernel contour with\n size hxw.\n kernel_region_num (int): The instance kernel region number.\n distance_threshold (float): The embedding distance threshold between\n kernel and pixel in one instance.\n\n Returns:\n list[list[float]]: The instance coordinates and attributes list. Each\n element consists of averaged confidence, pixel number, and coordinates\n (x_i, y_i for all pixels) in order.\n '
assert isinstance(score, (torch.Tensor, np.ndarray))
assert isinstance(mask, (torch.Tensor, np.ndarray))
assert isinstance(embedding, (torch.Tensor, np.ndarray))
assert isinstance(kernel_label, (torch.Tensor, np.ndarray))
assert isinstance(kernel_contour, (torch.Tensor, np.ndarray))
assert isinstance(kernel_region_num, int)
assert isinstance(distance_threshold, float)
if isinstance(score, np.ndarray):
score = torch.from_numpy(score)
if isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
if isinstance(embedding, np.ndarray):
embedding = torch.from_numpy(embedding)
if isinstance(kernel_label, np.ndarray):
kernel_label = torch.from_numpy(kernel_label)
if isinstance(kernel_contour, np.ndarray):
kernel_contour = torch.from_numpy(kernel_contour)
if (torch.__version__ == 'parrots'):
label = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num=kernel_region_num, distance_threshold=distance_threshold)
label = label.tolist()
label = label[0]
list_index = kernel_region_num
pixel_assignment = []
for x in range(kernel_region_num):
pixel_assignment.append(np.array(label[list_index:(list_index + int(label[x]))], dtype=np.float))
list_index = (list_index + int(label[x]))
else:
pixel_assignment = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold)
return pixel_assignment
|
def points_in_boxes_part(points, boxes):
'Find the box in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate.\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in\n LiDAR/DEPTH coordinate, (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M). Default background = -1.\n '
assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_((- 1))
points_device = points.get_device()
assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device'
if (torch.cuda.current_device() != points_device):
torch.cuda.set_device(points_device)
ext_module.points_in_boxes_part_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
|
def points_in_boxes_cpu(points, boxes):
'Find all boxes in which each point is (CPU). The CPU version of\n :meth:`points_in_boxes_all`.\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in\n LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n '
assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
num_boxes = boxes.shape[1]
point_indices = points.new_zeros((batch_size, num_boxes, num_points), dtype=torch.int)
for b in range(batch_size):
ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), points[b].float().contiguous(), point_indices[b])
point_indices = point_indices.transpose(1, 2)
return point_indices
|
def points_in_boxes_all(points, boxes):
'Find all boxes in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n '
assert (boxes.shape[0] == points.shape[0]), f'Points and boxes should have the same batch size, but got {boxes.shape[0]} and {boxes.shape[0]}'
assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}'
assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}'
(batch_size, num_points, _) = points.shape
num_boxes = boxes.shape[1]
box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), dtype=torch.int).fill_(0)
points_device = points.get_device()
assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device'
if (torch.cuda.current_device() != points_device):
torch.cuda.set_device(points_device)
ext_module.points_in_boxes_all_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
|
def points_in_polygons(points, polygons):
'Judging whether points are inside polygons, which is used in the ATSS\n assignment for the rotated boxes.\n\n It should be noted that when the point is just at the polygon boundary, the\n judgment will be inaccurate, but the effect on assignment is limited.\n\n Args:\n points (torch.Tensor): It has shape (B, 2), indicating (x, y).\n M means the number of predicted points.\n polygons (torch.Tensor): It has shape (M, 8), indicating\n (x1, y1, x2, y2, x3, y3, x4, y4). M means the number of\n ground truth polygons.\n\n Returns:\n torch.Tensor: Return the result with the shape of (B, M),\n 1 indicates that the point is inside the polygon,\n 0 indicates that the point is outside the polygon.\n '
assert (points.shape[1] == 2), f'points dimension should be 2, but got unexpected shape {points.shape[1]}'
assert (polygons.shape[1] == 8), f'polygons dimension should be 8, but got unexpected shape {polygons.shape[1]}'
output = torch.full([points.shape[0], polygons.shape[0]], 0.0).cuda().float()
ext_module.points_in_polygons_forward(points.contiguous(), polygons.contiguous(), output)
return output
|
class PSAMaskFunction(Function):
@staticmethod
def symbolic(g, input, psa_type, mask_size):
return g.op('mmcv::MMCVPSAMask', input, psa_type_i=psa_type, mask_size_i=mask_size)
@staticmethod
def forward(ctx, input, psa_type, mask_size):
ctx.psa_type = psa_type
ctx.mask_size = _pair(mask_size)
ctx.save_for_backward(input)
(h_mask, w_mask) = ctx.mask_size
(batch_size, channels, h_feature, w_feature) = input.size()
assert (channels == (h_mask * w_mask))
output = input.new_zeros((batch_size, (h_feature * w_feature), h_feature, w_feature))
ext_module.psamask_forward(input, output, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2))
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
psa_type = ctx.psa_type
(h_mask, w_mask) = ctx.mask_size
(batch_size, channels, h_feature, w_feature) = input.size()
grad_input = grad_output.new_zeros((batch_size, channels, h_feature, w_feature))
ext_module.psamask_backward(grad_output, grad_input, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2))
return (grad_input, None, None, None)
|
class PSAMask(nn.Module):
def __init__(self, psa_type, mask_size=None):
super(PSAMask, self).__init__()
assert (psa_type in ['collect', 'distribute'])
if (psa_type == 'collect'):
psa_type_enum = 0
else:
psa_type_enum = 1
self.psa_type_enum = psa_type_enum
self.mask_size = mask_size
self.psa_type = psa_type
def forward(self, input):
return psa_mask(input, self.psa_type_enum, self.mask_size)
def __repr__(self):
s = self.__class__.__name__
s += f'(psa_type={self.psa_type}, '
s += f'mask_size={self.mask_size})'
return s
|
class RiRoIAlignRotatedFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False):
if isinstance(out_size, int):
out_h = out_size
out_w = out_size
elif is_tuple_of(out_size, int):
assert (len(out_size) == 2)
(out_h, out_w) = out_size
else:
raise TypeError(f'"out_size" should be an integer or tuple of integers, but got {out_size}')
ctx.spatial_scale = spatial_scale
ctx.num_samples = num_samples
ctx.num_orientations = num_orientations
ctx.clockwise = clockwise
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
(batch_size, num_channels, _, _) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
ext_module.riroi_align_rotated_forward(features, rois, output, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise)
return output
@staticmethod
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
num_orientations = ctx.num_orientations
clockwise = ctx.clockwise
num_samples = ctx.num_samples
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
(batch_size, num_channels, feature_h, feature_w) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, feature_h, feature_w)
ext_module.riroi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise)
return (grad_input, grad_rois, None, None, None, None, None)
|
class RiRoIAlignRotated(nn.Module):
'Rotation-invariant RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n The details are described in the paper `ReDet: A Rotation-equivariant\n Detector for Aerial Object Detection <https://arxiv.org/abs/2103.07733>`_.\n\n Args:\n out_size (tuple): fixed dimensional RoI output with shape (h, w).\n spatial_scale (float): scale the input boxes by this number\n num_samples (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n num_orientations (int): number of oriented channels.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n '
def __init__(self, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False):
super(RiRoIAlignRotated, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.num_samples = int(num_samples)
self.num_orientations = int(num_orientations)
self.clockwise = clockwise
def forward(self, features, rois):
return RiRoIAlignRotatedFunction.apply(features, rois, self.out_size, self.spatial_scale, self.num_samples, self.num_orientations, self.clockwise)
|
class RoIAlignFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, pool_mode, aligned):
from ..onnx import is_custom_op_loaded
has_custom_op = is_custom_op_loaded()
if has_custom_op:
return g.op('mmcv::MMCVRoiAlign', input, rois, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, mode_s=pool_mode, aligned_i=aligned)
else:
from torch.onnx import TensorProtoDataType
from torch.onnx.symbolic_helper import _slice_helper
from torch.onnx.symbolic_opset9 import squeeze, sub
batch_indices = _slice_helper(g, rois, axes=[1], starts=[0], ends=[1])
batch_indices = squeeze(g, batch_indices, 1)
batch_indices = g.op('Cast', batch_indices, to_i=TensorProtoDataType.INT64)
rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
if aligned:
aligned_offset = g.op('Constant', value_t=torch.tensor([(0.5 / spatial_scale)], dtype=torch.float32))
rois = sub(g, rois, aligned_offset)
return g.op('RoiAlign', input, rois, batch_indices, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=max(0, sampling_ratio), mode_s=pool_mode)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
assert (pool_mode in ('max', 'avg'))
ctx.pool_mode = (0 if (pool_mode == 'max') else 1)
ctx.aligned = aligned
ctx.input_shape = input.size()
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
if (ctx.pool_mode == 0):
argmax_y = input.new_zeros(output_shape)
argmax_x = input.new_zeros(output_shape)
else:
argmax_y = input.new_zeros(0)
argmax_x = input.new_zeros(0)
ext_module.roi_align_forward(input, rois, output, argmax_y, argmax_x, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned)
ctx.save_for_backward(rois, argmax_y, argmax_x)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois, argmax_y, argmax_x) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
grad_output = grad_output.contiguous()
ext_module.roi_align_backward(grad_output, rois, argmax_y, argmax_x, grad_input, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned)
return (grad_input, None, None, None, None, None, None)
|
class RoIAlign(nn.Module):
"RoI align pooling layer.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n pool_mode (str, 'avg' or 'max'): pooling mode in each bin.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n use_torchvision (bool): whether to use roi_align from torchvision.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
@deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlign')
def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True, use_torchvision=False):
super(RoIAlign, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.pool_mode = pool_mode
self.aligned = aligned
self.use_torchvision = use_torchvision
def forward(self, input, rois):
'\n Args:\n input: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.\n '
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
if ('aligned' in tv_roi_align.__code__.co_varnames):
return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
else:
if self.aligned:
rois -= rois.new_tensor(([0.0] + ([(0.5 / self.spatial_scale)] * 4)))
return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
else:
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.pool_mode, self.aligned)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale}, '
s += f'sampling_ratio={self.sampling_ratio}, '
s += f'pool_mode={self.pool_mode}, '
s += f'aligned={self.aligned}, '
s += f'use_torchvision={self.use_torchvision})'
return s
|
class RoIAlignRotatedFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, aligned, clockwise):
if isinstance(output_size, int):
out_h = output_size
out_w = output_size
elif isinstance(output_size, tuple):
assert (len(output_size) == 2)
assert isinstance(output_size[0], int)
assert isinstance(output_size[1], int)
(out_h, out_w) = output_size
else:
raise TypeError('"output_size" must be an integer or tuple of integers')
return g.op('mmcv::MMCVRoIAlignRotated', input, rois, output_height_i=out_h, output_width_i=out_h, spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, aligned_i=aligned, clockwise_i=clockwise)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.aligned = aligned
ctx.clockwise = clockwise
ctx.save_for_backward(rois)
ctx.feature_size = input.size()
(batch_size, num_channels, data_height, data_width) = input.size()
num_rois = rois.size(0)
output = input.new_zeros(num_rois, num_channels, ctx.output_size[0], ctx.output_size[1])
ext_module.roi_align_rotated_forward(input, rois, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise)
return output
@staticmethod
def backward(ctx, grad_output):
feature_size = ctx.feature_size
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
(batch_size, num_channels, data_height, data_width) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width)
ext_module.roi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise)
return (grad_input, grad_rois, None, None, None, None, None)
|
class RoIAlignRotated(nn.Module):
"RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio(int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n Default: True.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
@deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlignRotated')
def __init__(self, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False):
super(RoIAlignRotated, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.aligned = aligned
self.clockwise = clockwise
def forward(self, input, rois):
return RoIAlignRotatedFunction.apply(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned, self.clockwise)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale}, '
s += f'sampling_ratio={self.sampling_ratio}, '
s += f'aligned={self.aligned}, '
s += f'clockwise={self.clockwise})'
return s
|
class RoIPoolFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale):
return g.op('MaxRoiPool', input, rois, pooled_shape_i=output_size, spatial_scale_f=spatial_scale)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale=1.0):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
argmax = input.new_zeros(output_shape, dtype=torch.int)
ext_module.roi_pool_forward(input, rois, output, argmax, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale)
ctx.save_for_backward(rois, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois, argmax) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.input_shape)
ext_module.roi_pool_backward(grad_output, rois, argmax, grad_input, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale)
return (grad_input, None, None, None)
|
class RoIPool(nn.Module):
def __init__(self, output_size, spatial_scale=1.0):
super(RoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale})'
return s
|
class RoIAwarePool3d(nn.Module):
"Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_ for more\n details.\n\n Args:\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int, optional): The maximum number of points per\n voxel. Default: 128.\n mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'.\n Default: 'max'.\n "
def __init__(self, out_size, max_pts_per_voxel=128, mode='max'):
super().__init__()
self.out_size = out_size
self.max_pts_per_voxel = max_pts_per_voxel
assert (mode in ['max', 'avg'])
pool_mapping = {'max': 0, 'avg': 1}
self.mode = pool_mapping[mode]
def forward(self, rois, pts, pts_feature):
'\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n '
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_per_voxel, self.mode)
|
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode):
'\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int): The maximum number of points per voxel.\n Default: 128.\n mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average\n pool).\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n '
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert (len(out_size) == 3)
assert mmcv.is_tuple_of(out_size, int)
(out_x, out_y, out_z) = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[(- 1)]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_per_voxel), dtype=torch.int)
ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method=mode)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
ret = ctx.roiaware_pool3d_for_backward
(pts_idx_of_voxels, argmax, mode, num_pts, num_channels) = ret
grad_in = grad_out.new_zeros((num_pts, num_channels))
ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method=mode)
return (None, None, grad_in, None, None, None)
|
class RoIPointPool3d(nn.Module):
'Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `Paper of PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_\n for more details.\n\n Args:\n num_sampled_points (int, optional): Number of samples in each roi.\n Default: 512.\n '
def __init__(self, num_sampled_points=512):
super().__init__()
self.num_sampled_points = num_sampled_points
def forward(self, points, point_features, boxes3d):
'\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n '
return RoIPointPool3dFunction.apply(points, point_features, boxes3d, self.num_sampled_points)
|
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
'\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n num_sampled_points (int, optional): The num of sampled points.\n Default: 512.\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n '
assert ((len(points.shape) == 3) and (points.shape[2] == 3))
(batch_size, boxes_num, feature_len) = (points.shape[0], boxes3d.shape[1], point_features.shape[2])
pooled_boxes3d = boxes3d.view(batch_size, (- 1), 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, (3 + feature_len)))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
ext_module.roipoint_pool3d_forward(points.contiguous(), pooled_boxes3d.contiguous(), point_features.contiguous(), pooled_features, pooled_empty_flag)
return (pooled_features, pooled_empty_flag)
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
|
class RotatedFeatureAlignFunction(Function):
'Using the feature interpolation to obtain the position information\n correspond to the refined rotate anchors and reconstruct the feature maps\n in pixel-wise manner to achieve feature alignment.\n\n The details are described in the paper\n `R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating\n Object <https://arxiv.org/abs/1908.05612>`_.\n '
@staticmethod
def forward(ctx, features, best_rbboxes, spatial_scale, points):
'\n Args:\n features (torch.Tensor): Input features with shape [N,C,H,W].\n best_rbboxes (torch.Tensor): Refined rotate anchors with\n shape [N,H,W,5]. Coordinate format (cx,cx,h,w,a).\n spatial_scale (float): The scale of feature map size and\n input image size.\n points (int, optional): The number of sample points.\n Only 1 and 5 are supported. Defaults to 1.\n\n Returns:\n torch.Tensor: Refined features with shape [N,C,H,W].\n '
ctx.spatial_scale = spatial_scale
ctx.points = points
ctx.save_for_backward(best_rbboxes)
assert (points in [1, 5])
output = torch.zeros_like(features)
ext_module.rotated_feature_align_forward(features, best_rbboxes, output, spatial_scale=spatial_scale, points=points)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
'\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [N,C,H,W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape [N,C,H,W].\n '
best_rbboxes = ctx.saved_tensors[0]
points = ctx.points
spatial_scale = ctx.spatial_scale
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = torch.zeros_like(grad_output)
ext_module.rotated_feature_align_backward(grad_output.contiguous(), best_rbboxes, grad_input, spatial_scale=spatial_scale, points=points)
return (grad_input, None, None, None)
|
def rotated_feature_align(features, best_rbboxes, spatial_scale=(1 / 8), points=1):
return RotatedFeatureAlignFunction.apply(features, best_rbboxes, spatial_scale, points)
|
@CONV_LAYERS.register_module(name='SAC')
class SAConv2d(ConvAWS2d):
"SAC (Switchable Atrous Convolution)\n\n This is an implementation of `DetectoRS: Detecting Objects with Recursive\n Feature Pyramid and Switchable Atrous Convolution\n <https://arxiv.org/abs/2006.02334>`_.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n use_deform: If ``True``, replace convolution with deformable\n convolution. Default: ``False``.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, use_deform=False):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.use_deform = use_deform
self.switch = nn.Conv2d(self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
self.pre_context = nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, bias=True)
self.post_context = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=True)
if self.use_deform:
self.offset_s = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.offset_l = nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.init_weights()
def init_weights(self):
constant_init(self.switch, 0, bias=1)
self.weight_diff.data.zero_()
constant_init(self.pre_context, 0)
constant_init(self.post_context, 0)
if self.use_deform:
constant_init(self.offset_s, 0)
constant_init(self.offset_l, 0)
def forward(self, x):
avg_x = F.adaptive_avg_pool2d(x, output_size=1)
avg_x = self.pre_context(avg_x)
avg_x = avg_x.expand_as(x)
x = (x + avg_x)
avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
switch = self.switch(avg_x)
weight = self._get_weight(self.weight)
zero_bias = torch.zeros(self.out_channels, device=weight.device, dtype=weight.dtype)
if self.use_deform:
offset = self.offset_s(avg_x)
out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_s = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_s = super()._conv_forward(x, weight, zero_bias)
else:
out_s = super()._conv_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(((3 * p) for p in self.padding))
self.dilation = tuple(((3 * d) for d in self.dilation))
weight = (weight + self.weight_diff)
if self.use_deform:
offset = self.offset_l(avg_x)
out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
elif ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.5.0'))):
out_l = super().conv2d_forward(x, weight)
elif (digit_version(TORCH_VERSION) >= digit_version('1.8.0')):
out_l = super()._conv_forward(x, weight, zero_bias)
else:
out_l = super()._conv_forward(x, weight)
out = ((switch * out_s) + ((1 - switch) * out_l))
self.padding = ori_p
self.dilation = ori_d
avg_x = F.adaptive_avg_pool2d(out, output_size=1)
avg_x = self.post_context(avg_x)
avg_x = avg_x.expand_as(out)
out = (out + avg_x)
return out
|
def _calculate_fan_in_and_fan_out_hwio(tensor):
dimensions = tensor.ndimension()
if (dimensions < 2):
raise ValueError('fan in and fan out can not be computed for tensorwith fewer than 2 dimensions')
if (dimensions == 2):
fan_in = tensor.size((- 2))
fan_out = tensor.size((- 1))
else:
num_input_fmaps = tensor.size((- 2))
num_output_fmaps = tensor.size((- 1))
receptive_field_size = 1
if (tensor.dim() > 2):
receptive_field_size = tensor[(..., 0, 0)].numel()
fan_in = (num_input_fmaps * receptive_field_size)
fan_out = (num_output_fmaps * receptive_field_size)
return (fan_in, fan_out)
|
class SparseConvolution(SparseModule):
def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False):
super(SparseConvolution, self).__init__()
assert (groups == 1)
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(output_padding, (list, tuple))):
output_padding = ([output_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
self.ndim = ndim
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.conv1x1 = (np.prod(kernel_size) == 1)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.inverse = inverse
self.output_padding = output_padding
self.groups = groups
self.subm = subm
self.indice_key = indice_key
self.fused_bn = fused_bn
self.weight = Parameter(torch.Tensor(*kernel_size, in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = _calculate_fan_in_and_fan_out_hwio(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
if self.transposed:
out_spatial_shape = ops.get_deconv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding)
else:
out_spatial_shape = ops.get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
if self.conv1x1:
features = torch.mm(input.features, self.weight.view(self.in_channels, self.out_channels))
if (self.bias is not None):
features += self.bias
out_tensor = SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
data = input.find_indice_pair(self.indice_key)
if self.inverse:
assert ((data is not None) and (self.indice_key is not None))
(_, outids, indice_pairs, indice_pair_num, out_spatial_shape) = data
assert (indice_pairs.shape[0] == np.prod(self.kernel_size)), 'inverse conv must have same kernel size as its couple conv'
elif ((self.indice_key is not None) and (data is not None)):
(outids, _, indice_pairs, indice_pair_num, _) = data
else:
(outids, indice_pairs, indice_pair_num) = ops.get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding, self.subm, self.transposed, grid=input.grid)
input.indice_dict[self.indice_key] = (outids, indices, indice_pairs, indice_pair_num, spatial_shape)
if self.fused_bn:
assert (self.bias is not None)
out_features = ops.fused_indice_conv(features, self.weight, self.bias, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.inverse, self.subm)
else:
if self.subm:
out_features = Fsp.indice_subm_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
elif self.inverse:
out_features = Fsp.indice_inverse_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
else:
out_features = Fsp.indice_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0])
if (self.bias is not None):
out_features += self.bias
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
@CONV_LAYERS.register_module()
class SparseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseConvTranspose3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key)
|
class SparseConvFunction(Function):
'Sparse Convolution.\n\n Please refer to `SECOND <https://www.mdpi.com/1424-8220/18/10/3337>`_ for\n more details.\n '
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False)
return (input_bp, filters_bp, None, None, None)
|
class SparseInverseConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, True, False)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, True, False)
return (input_bp, filters_bp, None, None, None)
|
class SubMConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from gather-gemm-scatter.\n '
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, True)
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors
(input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, True)
return (input_bp, filters_bp, None, None, None)
|
class SparseMaxPoolFunction(Function):
@staticmethod
def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n and outputs locations.\n indice_pair_num (torch.Tensor): Indice pairs num.\n num_activate_out (torch.Tensor): Output channels num.\n\n Returns:\n torch.Tensor: Output features from sparse maxpooling.\n '
out = ops.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out)
ctx.save_for_backward(indice_pairs, indice_pair_num, features, out)
return out
@staticmethod
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, out) = ctx.saved_tensors
input_bp = ops.indice_maxpool_backward(features, out, grad_output, indice_pairs, indice_pair_num)
return (input_bp, None, None, None)
|
def is_spconv_module(module):
spconv_modules = (SparseModule,)
return isinstance(module, spconv_modules)
|
def is_sparse_conv(module):
from .sparse_conv import SparseConvolution
return isinstance(module, SparseConvolution)
|
def _mean_update(vals, m_vals, t):
outputs = []
if (not isinstance(vals, list)):
vals = [vals]
if (not isinstance(m_vals, list)):
m_vals = [m_vals]
for (val, m_val) in zip(vals, m_vals):
output = (((t / float((t + 1))) * m_val) + ((1 / float((t + 1))) * val))
outputs.append(output)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs
|
class SparseModule(nn.Module):
'place holder, All module subclass from this will take sptensor in\n SparseSequential.'
pass
|
class SparseSequential(SparseModule):
"A sequential container.\n Modules will be added to it in the order they are passed in the\n constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, given is a small example::\n\n Example:\n >>> # using Sequential:\n >>> from mmcv.ops import SparseSequential\n >>> model = SparseSequential(\n SparseConv2d(1,20,5),\n nn.ReLU(),\n SparseConv2d(20,64,5),\n nn.ReLU()\n )\n\n >>> # using Sequential with OrderedDict\n >>> model = SparseSequential(OrderedDict([\n ('conv1', SparseConv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', SparseConv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n >>> # using Sequential with kwargs(python 3.6+)\n >>> model = SparseSequential(\n conv1=SparseConv2d(1,20,5),\n relu1=nn.ReLU(),\n conv2=SparseConv2d(20,64,5),\n relu2=nn.ReLU()\n )\n "
def __init__(self, *args, **kwargs):
super(SparseSequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
for (name, module) in kwargs.items():
if (sys.version_info < (3, 6)):
raise ValueError('kwargs only supported in py36+')
if (name in self._modules):
raise ValueError('name exists.')
self.add_module(name, module)
self._sparity_dict = {}
def __getitem__(self, idx):
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
@property
def sparity_dict(self):
return self._sparity_dict
def add(self, module, name=None):
if (name is None):
name = str(len(self._modules))
if (name in self._modules):
raise KeyError('name exists')
self.add_module(name, module)
def forward(self, input):
for (k, module) in self._modules.items():
if is_spconv_module(module):
assert isinstance(input, SparseConvTensor)
self._sparity_dict[k] = input.sparity
input = module(input)
elif isinstance(input, SparseConvTensor):
if (input.indices.shape[0] != 0):
input.features = module(input.features)
else:
input = module(input)
return input
def fused(self):
from .sparse_conv import SparseConvolution
mods = [v for (k, v) in self._modules.items()]
fused_mods = []
idx = 0
while (idx < len(mods)):
if is_sparse_conv(mods[idx]):
if ((idx < (len(mods) - 1)) and isinstance(mods[(idx + 1)], nn.BatchNorm1d)):
new_module = SparseConvolution(ndim=mods[idx].ndim, in_channels=mods[idx].in_channels, out_channels=mods[idx].out_channels, kernel_size=mods[idx].kernel_size, stride=mods[idx].stride, padding=mods[idx].padding, dilation=mods[idx].dilation, groups=mods[idx].groups, bias=True, subm=mods[idx].subm, output_padding=mods[idx].output_padding, transposed=mods[idx].transposed, inverse=mods[idx].inverse, indice_key=mods[idx].indice_key, fused_bn=True)
new_module.load_state_dict(mods[idx].state_dict(), False)
new_module.to(mods[idx].weight.device)
conv = new_module
bn = mods[(idx + 1)]
conv.bias.data.zero_()
conv.weight.data[:] = ((conv.weight.data * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps))
conv.bias.data[:] = ((((conv.bias.data - bn.running_mean) * bn.weight.data) / (torch.sqrt(bn.running_var) + bn.eps)) + bn.bias.data)
fused_mods.append(conv)
idx += 2
else:
fused_mods.append(mods[idx])
idx += 1
else:
fused_mods.append(mods[idx])
idx += 1
return SparseSequential(*fused_mods)
|
class ToDense(SparseModule):
'convert SparseConvTensor to NCHW dense tensor.'
def forward(self, x: SparseConvTensor):
return x.dense()
|
class RemoveGrid(SparseModule):
'remove pre-allocated grid buffer.'
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (((((input_size[i] + (2 * padding[i])) - (dilation[i] * (kernel_size[i] - 1))) - 1) // stride[i]) + 1)
if (kernel_size[i] == (- 1)):
output_size.append(1)
else:
output_size.append(size)
return output_size
|
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding):
ndim = len(input_size)
output_size = []
for i in range(ndim):
if (kernel_size[i] == (- 1)):
raise ValueError("deconv don't support kernel_size < 0")
size = (((((input_size[i] - 1) * stride[i]) - (2 * padding[i])) + kernel_size[i]) + output_padding[i])
output_size.append(size)
return output_size
|
def get_indice_pairs(indices, batch_size, spatial_shape, ksize=3, stride=1, padding=0, dilation=1, out_padding=0, subm=False, transpose=False, grid=None):
ndim = (indices.shape[1] - 1)
if (not isinstance(ksize, (list, tuple))):
ksize = ([ksize] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
if (not isinstance(out_padding, (list, tuple))):
out_padding = ([out_padding] * ndim)
for (d, s) in zip(dilation, stride):
assert any([(s == 1), (d == 1)]), "don't support this."
if (not subm):
if transpose:
out_shape = get_deconv_output_size(spatial_shape, ksize, stride, padding, dilation, out_padding)
else:
out_shape = get_conv_output_size(spatial_shape, ksize, stride, padding, dilation)
else:
out_shape = spatial_shape
if (grid is None):
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_forward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_forward
elif (ndim == 4):
get_indice_pairs_func = ext_module.get_indice_pairs_4d_forward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
else:
if (ndim == 2):
get_indice_pairs_func = ext_module.get_indice_pairs_2d_backward
elif (ndim == 3):
get_indice_pairs_func = ext_module.get_indice_pairs_3d_backward
else:
raise NotImplementedError
return get_indice_pairs_func(indices, grid, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose))
|
def indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_forward(features, filters, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
else:
raise NotImplementedError
|
def fused_indice_conv(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm):
if ((features.dtype == torch.half) or (filters.dtypes == torch.float32)):
func = ext_module.fused_indice_conv_forward
else:
raise NotImplementedError
return func(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm))
|
def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
else:
raise NotImplementedError
|
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_forward(features, indice_pairs, indice_pair_num, num_activate_out)
else:
raise NotImplementedError
|
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num)
else:
raise NotImplementedError
|
class SparseMaxPool(SparseModule):
def __init__(self, ndim, kernel_size, stride=1, padding=0, dilation=1, subm=False):
super(SparseMaxPool, self).__init__()
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
self.ndim = ndim
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.subm = subm
self.dilation = dilation
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
out_spatial_shape = get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
(outids, indice_pairs, indice_pairs_num) = get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, 0, self.subm)
out_features = indice_maxpool(features, indice_pairs.to(device), indice_pairs_num.to(device), outids.shape[0])
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.