code
stringlengths
17
6.64M
def _interpolate_size_to_scales(g, input, output_size, dim): output_size = _maybe_get_const(output_size, 'is') if _is_value(output_size): offset = 2 offsets = g.op('Constant', value_t=torch.ones(offset, dtype=torch.float32)) dividend = g.op('Cast', output_size, to_i=cast_pytorch_to_onnx['Float']) divisor = _slice_helper(g, g.op('Shape', input), axes=[0], ends=[maxsize], starts=[offset]) divisor = g.op('Cast', divisor, to_i=cast_pytorch_to_onnx['Float']) scale_dims = g.op('Div', dividend, divisor) scales = g.op('Concat', offsets, scale_dims, axis_i=0) else: scales_constant = [(1.0 if (i < 2) else (float(output_size[(- (dim - i))]) / float(input.type().sizes()[(- (dim - i))]))) for i in range(0, dim)] scales = g.op('Constant', value_t=torch.tensor(scales_constant, dtype=torch.float32)) return scales
def _interpolate_get_scales_if_available(g, scales): if (len(scales) == 0): return None scale_desc = ('fs' if ((scales[0].type().kind() == 'ListType') or ((scales[0].type().kind() == 'TensorType') and (sum(scales[0].type().sizes()) > 1))) else 'f') available_scales = ((_maybe_get_const(scales[0], scale_desc) != (- 1)) and (not _is_none(scales[0]))) if (not available_scales): return None offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32)) if (scale_desc == 'fs'): scales_list = g.op('Constant', value_t=torch.tensor(_maybe_get_const(scales[0], scale_desc))) scales = g.op('Concat', offsets, scales_list, axis_i=0) else: scales_list = [] for scale in scales: unsqueezed_scale = _unsqueeze_helper(g, scale, 0) unsqueezed_scale = g.op('Cast', unsqueezed_scale, to_i=cast_pytorch_to_onnx['Float']) scales_list.append(unsqueezed_scale) scales = g.op('Concat', offsets, *scales_list, axis_i=0) return scales
def _get_interpolate_attributes(g, mode, args): if (mode == 'nearest'): align_corners = None scales = args[0:] else: align_corners = args[0] scales = args[1:] scales = _interpolate_get_scales_if_available(g, scales) return (scales, align_corners)
def _interpolate_get_scales(g, scale_factor, dim): offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32)) if isinstance(scale_factor.type(), torch._C.ListType): return g.op('Concat', offsets, scale_factor, axis_i=0) else: scale_factor = _unsqueeze_helper(g, scale_factor, 0) scale_factor = g.op('Cast', scale_factor, to_i=cast_pytorch_to_onnx['Float']) scales = [scale_factor for i in range((dim - 2))] scale_factor = g.op('Concat', offsets, *scales, axis_i=0) return scale_factor
def _size_helper(g, self, dim): full_shape = g.op('Shape', self) from torch.onnx.symbolic_opset9 import select return select(g, full_shape, g.op('Constant', value_t=torch.tensor([0])), dim)
def _avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name): if (divisor_override and (divisor_override.node().kind() != 'prim::Constant')): return _unimplemented(name, 'divisor_override') if (not stride): stride = kernel_size padding = tuple(tuple_fn(padding)) return padding
def _interpolate(name, dim, interpolate_mode): def symbolic_fn(g, input, output_size, *args): (scales, align_corners) = sym_help._get_interpolate_attributes(g, interpolate_mode, args) align_corners = sym_help._maybe_get_scalar(align_corners) transformation_mode = ('asymmetric' if (interpolate_mode == 'nearest') else ('align_corners' if align_corners else 'pytorch_half_pixel')) empty_tensor = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32)) if (scales is None): if (('ONNX_BACKEND' in os.environ) and (os.environ['ONNX_BACKEND'] == 'TensorRT')): input_size = input.type().sizes() input_size = input_size[:2] output_size = sym_help._maybe_get_const(output_size, 'is') input_size.extend(output_size) output_size = g.op('Constant', value_t=torch.tensor(input_size, dtype=torch.int64)) else: input_size = g.op('Shape', input) input_size_beg = sym_help._slice_helper(g, input_size, axes=[0], ends=[2], starts=[0]) output_size = g.op('Cast', output_size, to_i=sym_help.cast_pytorch_to_onnx['Long']) output_size = g.op('Concat', input_size_beg, output_size, axis_i=0) scales = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32)) return g.op('Resize', input, empty_tensor, scales, output_size, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor') else: return g.op('Resize', input, empty_tensor, scales, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor') return symbolic_fn
@parse_args('v', 'v', 'i', 'i', 'i', 'none') def topk(g, self, k, dim, largest, sorted, out=None): return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
def masked_select(g, self, mask): from torch.onnx.symbolic_opset9 import expand_as, nonzero index = nonzero(g, expand_as(g, mask, self)) return g.op('GatherND', self, index)
def _prepare_onnx_paddings(g, dim, pad): pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op('Constant', value_t=torch.tensor([0]))) extension = g.op('Sub', g.op('Mul', g.op('Constant', value_t=torch.tensor(dim, dtype=torch.int64)), g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))), pad_len) pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long']) paddings = g.op('Concat', pad, g.op('ConstantOfShape', extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0) paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1), 2]))) paddings = g.op('Transpose', torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0]) paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1)]))) padding_c = g.op('Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long']) return padding_c
def constant_pad_nd(g, input, padding, value=None): mode = 'constant' value = sym_help._maybe_get_scalar(value) value = sym_help._if_scalar_type_as(g, value, input) pad = _prepare_onnx_paddings(g, input.type().dim(), padding) return g.op('Pad', input, pad, value, mode_s=mode)
def reflection_pad(g, input, padding): mode = 'reflect' paddings = _prepare_onnx_paddings(g, input.type().dim(), padding) return g.op('Pad', input, paddings, mode_s=mode)
def _avg_pool(name, tuple_fn): @parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none') def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None): padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name) if (not stride): stride = kernel_size if count_include_pad: input = g.op('Pad', input, g.op('Constant', value_t=torch.tensor(((((0,) * 2) + padding) * 2))), mode_s='constant') padding = ((0,) * len(padding)) output = g.op('AveragePool', input, kernel_shape_i=tuple_fn(kernel_size), strides_i=tuple_fn(stride), pads_i=(padding * 2), ceil_mode_i=ceil_mode) return output return symbolic_fn
def _get_im2col_indices_along_dim(g, input_d, kernel_size_d, dilation_d, padding_d, stride_d): blocks_d = g.op('Add', input_d, g.op('Constant', value_t=torch.tensor((padding_d * 2)))) blocks_d = g.op('Sub', blocks_d, g.op('Constant', value_t=torch.tensor((dilation_d * (kernel_size_d - 1))))) blocks_d_indices = g.op('Range', g.op('Constant', value_t=torch.tensor(0)), blocks_d, g.op('Constant', value_t=torch.tensor(stride_d))) kernel_grid = np.arange(0, (kernel_size_d * dilation_d), dilation_d) kernel_grid = g.op('Constant', value_t=torch.tensor([kernel_grid])) blocks_d_indices = g.op('Unsqueeze', blocks_d_indices, axes_i=[0]) kernel_mask = g.op('Reshape', kernel_grid, g.op('Constant', value_t=torch.tensor([(- 1), 1]))) block_mask = g.op('Add', blocks_d_indices, kernel_mask) return block_mask
def _get_im2col_padded_input(g, input, padding_h, padding_w): pad = g.op('Constant', value_t=torch.LongTensor(([0, 0, padding_h, padding_w] * 2))) return g.op('Pad', input, pad)
def _get_im2col_output_shape(g, input, kernel_h, kernel_w): batch_dim = size(g, input, g.op('Constant', value_t=torch.tensor(0))) channel_dim = size(g, input, g.op('Constant', value_t=torch.tensor(1))) channel_unfolded = g.op('Mul', channel_dim, g.op('Constant', value_t=torch.tensor((kernel_h * kernel_w)))) return g.op('Concat', g.op('Unsqueeze', batch_dim, axes_i=[0]), g.op('Unsqueeze', channel_unfolded, axes_i=[0]), g.op('Constant', value_t=torch.tensor([(- 1)])), axis_i=0)
def size(g, self, dim=None): if (dim is None): return g.op('Shape', self) return sym_help._size_helper(g, self, dim)
@parse_args('v', 'is', 'is', 'is', 'is') def im2col(g, input, kernel_size, dilation, padding, stride): input_h = size(g, input, g.op('Constant', value_t=torch.tensor(2))) input_w = size(g, input, g.op('Constant', value_t=torch.tensor(3))) (stride_h, stride_w) = (stride[0], stride[1]) (padding_h, padding_w) = (padding[0], padding[1]) (dilation_h, dilation_w) = (dilation[0], dilation[1]) (kernel_h, kernel_w) = (kernel_size[0], kernel_size[1]) blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h) blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w) output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w) padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w) output = g.op('Gather', padded_input, blocks_row_indices, axis_i=2) output = g.op('Gather', output, blocks_col_indices, axis_i=4) output = g.op('Transpose', output, perm_i=[0, 1, 2, 4, 3, 5]) return g.op('Reshape', output, output_shape)
@parse_args('v', 'i') def one_hot(g, self, num_classes): values = g.op('Constant', value_t=torch.LongTensor([0, 1])) depth = g.op('Constant', value_t=torch.LongTensor([num_classes])) return g.op('OneHot', self, depth, values, axis_i=(- 1))
@parse_args('v', 'i', 'none') def softmax(g, input, dim, dtype=None): input_dim = input.type().dim() if input_dim: if (dim < 0): dim = (input_dim + dim) if (input_dim == (dim + 1)): softmax = g.op('Softmax', input, axis_i=dim) if (dtype and (dtype.node().kind() != 'prim::Constant')): parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype') softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype]) return softmax max_value = g.op('ReduceMax', input, axes_i=[dim], keepdims_i=1) input = g.op('Sub', input, max_value) exp = g.op('Exp', input) sum = g.op('ReduceSum', exp, axes_i=[dim]) softmax = g.op('Div', exp, sum) if (dtype and (dtype.node().kind() != 'prim::Constant')): parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype') softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype]) return softmax
def _adaptive_pool(name, type, tuple_fn, fn=None): @parse_args('v', 'is') def symbolic_fn(g, input, output_size): if ((output_size == ([1] * len(output_size))) and (type == 'AveragePool')): return g.op('GlobalAveragePool', input) if (not input.isCompleteTensor()): if (output_size == ([1] * len(output_size))): return (g.op('GlobalMaxPool', input), None) raise NotImplementedError('[Adaptive pool]:input size not accessible') dim = input.type().sizes()[2:] if ((output_size == ([1] * len(output_size))) and (type == 'MaxPool')): return (g.op('GlobalMaxPool', input), None) s = [int((dim[i] / output_size[i])) for i in range(0, len(dim))] k = [(dim[i] - ((output_size[i] - 1) * s[i])) for i in range(0, len(dim))] if (type == 'MaxPool'): return fn(g, input, k, k, ((0,) * len(dim)), ((1,) * len(dim)), False) output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(s), ceil_mode_i=False) return output return symbolic_fn
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False): from torch.onnx.symbolic_opset9 import full if ((dtype is None) and self.isCompleteTensor()): dtype = self.type().scalarType() dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype]) return full(g, size, fill_value, dtype, layout, device, pin_memory)
@parse_args('v', 'v', 'i', 'i', 'i') def grid_sampler(g, input, grid, interpolation_mode, padding_mode, align_corners=False): return g.op('mmcv::grid_sampler', input, grid, interpolation_mode_i=interpolation_mode, padding_mode_i=padding_mode, align_corners_i=align_corners)
@parse_args('v', 'i') def cummax(g, input, dim): return g.op('mmcv::cummax', input, dim_i=dim, outputs=2)
@parse_args('v', 'i') def cummin(g, input, dim): return g.op('mmcv::cummin', input, dim_i=dim, outputs=2)
@parse_args('v', 'v', 'is') def roll(g, input, shifts, dims): from packaging import version from torch.onnx.symbolic_opset9 import squeeze input_shape = g.op('Shape', input) need_flatten = (len(dims) == 0) if need_flatten: resize_shape = input_shape input = g.op('Reshape', input, g.op('Constant', value_t=torch.LongTensor([1, (- 1)]))) input_shape = g.op('Shape', input) dims = [1] for (index, dim) in enumerate(dims): end_size = sym_help._slice_helper(g, input_shape, axes=[0], ends=[(dim + 1)], starts=[dim]) shift_size = sym_help._slice_helper(g, shifts, axes=[0], ends=[(index + 1)], starts=[index]) slice_size = g.op('Sub', end_size, shift_size) div_size = g.op('Div', slice_size, end_size) slice_size = g.op('Sub', slice_size, g.op('Mul', end_size, div_size)) if (version.parse(torch.__version__) >= version.parse('1.7.0')): end_size = squeeze(g, end_size, 0) slice_size = squeeze(g, slice_size, 0) else: end_size = g.op('Squeeze', end_size) slice_size = g.op('Squeeze', slice_size) dim = torch.LongTensor([dim]) input_slice0 = sym_help._slice_helper(g, input, axes=dim, starts=torch.LongTensor([0]), ends=slice_size, dynamic_slice=True) input_slice1 = sym_help._slice_helper(g, input, axes=dim, ends=end_size, starts=slice_size, dynamic_slice=True) input = g.op('Concat', input_slice1, input_slice0, axis_i=dim) if need_flatten: input = g.op('Reshape', input, resize_shape) return input
def register_extra_symbolics(opset=11): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) register_op('one_hot', one_hot, '', opset) register_op('im2col', im2col, '', opset) register_op('topk', topk, '', opset) register_op('softmax', softmax, '', opset) register_op('constant_pad_nd', constant_pad_nd, '', opset) register_op('reflection_pad1d', reflection_pad1d, '', opset) register_op('reflection_pad2d', reflection_pad2d, '', opset) register_op('reflection_pad3d', reflection_pad3d, '', opset) register_op('avg_pool1d', avg_pool1d, '', opset) register_op('avg_pool2d', avg_pool2d, '', opset) register_op('avg_pool3d', avg_pool3d, '', opset) register_op('adaptive_avg_pool1d', adaptive_avg_pool1d, '', opset) register_op('adaptive_avg_pool2d', adaptive_avg_pool2d, '', opset) register_op('adaptive_avg_pool3d', adaptive_avg_pool3d, '', opset) register_op('masked_select', masked_select, '', opset) register_op('upsample_nearest1d', upsample_nearest1d, '', opset) register_op('upsample_nearest2d', upsample_nearest2d, '', opset) register_op('upsample_nearest3d', upsample_nearest3d, '', opset) register_op('upsample_linear1d', upsample_linear1d, '', opset) register_op('upsample_bilinear2d', upsample_bilinear2d, '', opset) register_op('upsample_trilinear3d', upsample_trilinear3d, '', opset) register_op('upsample_bicubic2d', upsample_bicubic2d, '', opset) register_op('new_full', new_full, '', opset) register_op('grid_sampler', grid_sampler, '', opset) register_op('cummax', cummax, '', opset) register_op('cummin', cummin, '', opset) register_op('roll', roll, '', opset)
class ActiveRotatedFilterFunction(Function): 'Encoding the orientation information and generating orientation-\n sensitive features.\n\n The details are described in the paper `Align Deep Features for Oriented\n Object Detection <https://arxiv.org/abs/2008.09397>_`.\n ' @staticmethod def forward(ctx, input, indices): '\n Args:\n input (torch.Tensor): Input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n indices (torch.Tensor): Indices with shape\n [num_orientations, H, W, num_rotations].\n\n Returns:\n torch.Tensor: Refined features with shape [num_output_planes *\n num_rotations, num_input_planes * num_orientations, H, W].\n ' ctx.save_for_backward(input, indices) (op, ip, o, h, w) = input.size() (o, h, w, r) = indices.size() output = input.new_zeros(((op * r), (ip * o), h, w)) ext_module.active_rotated_filter_forward(input, indices, output) return output @staticmethod @once_differentiable def backward(ctx, grad_out): '\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [num_output_planes * num_rotations,\n num_input_planes * num_orientations, H, W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n ' (input, indices) = ctx.saved_tensors grad_in = torch.zeros_like(input) ext_module.active_rotated_filter_backward(grad_out, indices, grad_in) return (grad_in, None)
class AssignScoreWithK(Function): 'Perform weighted sum to generate output features according to scores.\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/paconv_lib/src/gpu>`_.\n\n This is a memory-efficient CUDA implementation of assign_scores operation,\n which first transform all point features with weight bank, then assemble\n neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.\n\n See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for\n more detailed descriptions.\n\n Note:\n This implementation assumes using ``neighbor`` kernel input, which is\n (point_features - center_features, point_features).\n See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/\n pointnet2/paconv.py#L128 for more details.\n ' @staticmethod def forward(ctx, scores, point_features, center_features, knn_idx, aggregate='sum'): "\n Args:\n scores (torch.Tensor): (B, npoint, K, M), predicted scores to\n aggregate weight matrices in the weight bank.\n ``npoint`` is the number of sampled centers.\n ``K`` is the number of queried neighbors.\n ``M`` is the number of weight matrices in the weight bank.\n point_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed point features to be aggregated.\n center_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed center features to be aggregated.\n knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.\n We assume the first idx in each row is the idx of the center.\n aggregate (str, optional): Aggregation method.\n Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.\n\n Returns:\n torch.Tensor: (B, out_dim, npoint, K), the aggregated features.\n " agg = {'sum': 0, 'avg': 1, 'max': 2} (B, N, M, out_dim) = point_features.size() (_, npoint, K, _) = scores.size() output = point_features.new_zeros((B, out_dim, npoint, K)) ext_module.assign_score_withk_forward(point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), output, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg[aggregate]) ctx.save_for_backward(output, point_features, center_features, scores, knn_idx) ctx.agg = agg[aggregate] return output @staticmethod def backward(ctx, grad_out): '\n Args:\n grad_out (torch.Tensor): (B, out_dim, npoint, K)\n\n Returns:\n tuple[torch.Tensor]: A tuple contains five elements. The first one\n is the gradient of ``scores`` whose shape is (B, npoint, K, M). The\n second is the gradient of ``point_features`` whose shape is\n (B, N, M, out_dim). The third is the gradient of\n ``center_features`` with the shape of (B, N, M, out_dim). The last\n two are ``None``.\n ' (_, point_features, center_features, scores, knn_idx) = ctx.saved_tensors agg = ctx.agg (B, N, M, out_dim) = point_features.size() (_, npoint, K, _) = scores.size() grad_point_features = point_features.new_zeros(point_features.shape) grad_center_features = center_features.new_zeros(center_features.shape) grad_scores = scores.new_zeros(scores.shape) ext_module.assign_score_withk_backward(grad_out.contiguous(), point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), grad_point_features, grad_center_features, grad_scores, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg) return (grad_scores, grad_point_features, grad_center_features, None, None)
class BallQuery(Function): 'Find nearby points in spherical space.' @staticmethod def forward(ctx, min_radius: float, max_radius: float, sample_num: int, xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor: '\n Args:\n min_radius (float): minimum radius of the balls.\n max_radius (float): maximum radius of the balls.\n sample_num (int): maximum number of features in the balls.\n xyz (Tensor): (B, N, 3) xyz coordinates of the features.\n center_xyz (torch.Tensor): (B, npoint, 3) centers of the ball\n query.\n\n Returns:\n torch.Tensor: (B, npoint, nsample) tensor with the indices of the\n features that form the query balls.\n ' assert center_xyz.is_contiguous() assert xyz.is_contiguous() assert (min_radius < max_radius) (B, N, _) = xyz.size() npoint = center_xyz.size(1) idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int) ext_module.ball_query_forward(center_xyz, xyz, idx, b=B, n=N, m=npoint, min_radius=min_radius, max_radius=max_radius, nsample=sample_num) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(idx) return idx @staticmethod def backward(ctx, a=None): return (None, None, None, None)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0): 'Calculate overlap between two set of bboxes.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n Args:\n bboxes1 (torch.Tensor): shape (m, 4) in <x1, y1, x2, y2> format or\n empty.\n bboxes2 (torch.Tensor): shape (n, 4) in <x1, y1, x2, y2> format or\n empty. If aligned is ``True``, then m and n must be equal.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (m, n) else (m, 1).\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> bbox_overlaps(bboxes1, bboxes2)\n tensor([[0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 1.0000],\n [0.0000, 0.0000, 0.0000]])\n\n Example:\n >>> empty = torch.FloatTensor([])\n >>> nonempty = torch.FloatTensor([\n >>> [0, 0, 10, 9],\n >>> ])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n ' mode_dict = {'iou': 0, 'iof': 1} assert (mode in mode_dict.keys()) mode_flag = mode_dict[mode] assert ((bboxes1.size((- 1)) == 4) or (bboxes1.size(0) == 0)) assert ((bboxes2.size((- 1)) == 4) or (bboxes2.size(0) == 0)) assert ((offset == 1) or (offset == 0)) rows = bboxes1.size(0) cols = bboxes2.size(0) if aligned: assert (rows == cols) if ((rows * cols) == 0): return (bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols)) if aligned: ious = bboxes1.new_zeros(rows) else: ious = bboxes1.new_zeros((rows, cols)) ext_module.bbox_overlaps(bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset) return ious
class BorderAlignFunction(Function): @staticmethod def symbolic(g, input, boxes, pool_size): return g.op('mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size) @staticmethod def forward(ctx, input, boxes, pool_size): ctx.pool_size = pool_size ctx.input_shape = input.size() assert (boxes.ndim == 3), 'boxes must be with shape [B, H*W, 4]' assert (boxes.size(2) == 4), 'the last dimension of boxes must be (x1, y1, x2, y2)' assert ((input.size(1) % 4) == 0), 'the channel for input feature must be divisible by factor 4' output_shape = (input.size(0), (input.size(1) // 4), boxes.size(1), 4) output = input.new_zeros(output_shape) argmax_idx = input.new_zeros(output_shape).to(torch.int) ext_module.border_align_forward(input, boxes, output, argmax_idx, pool_size=ctx.pool_size) ctx.save_for_backward(boxes, argmax_idx) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (boxes, argmax_idx) = ctx.saved_tensors grad_input = grad_output.new_zeros(ctx.input_shape) grad_output = grad_output.contiguous() ext_module.border_align_backward(grad_output, boxes, argmax_idx, grad_input, pool_size=ctx.pool_size) return (grad_input, None, None)
class BorderAlign(nn.Module): "Border align pooling layer.\n\n Applies border_align over the input feature based on predicted bboxes.\n The details were described in the paper\n `BorderDet: Border Feature for Dense Object Detection\n <https://arxiv.org/abs/2007.11056>`_.\n\n For each border line (e.g. top, left, bottom or right) of each box,\n border_align does the following:\n\n 1. uniformly samples ``pool_size`` +1 positions on this line, involving\n the start and end points.\n 2. the corresponding features on these points are computed by bilinear\n interpolation.\n 3. max pooling over all the ``pool_size`` +1 positions are used for\n computing pooled feature.\n\n Args:\n pool_size (int): number of positions sampled over the boxes' borders\n (e.g. top, bottom, left, right).\n " def __init__(self, pool_size): super(BorderAlign, self).__init__() self.pool_size = pool_size def forward(self, input, boxes): '\n Args:\n input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),\n [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,\n right features respectively.\n boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).\n\n Returns:\n torch.Tensor: Pooled features with shape [N,C,H*W,4]. The order is\n (top,left,bottom,right) for the last dimension.\n ' return border_align(input, boxes, self.pool_size) def __repr__(self): s = self.__class__.__name__ s += f'(pool_size={self.pool_size})' return s
def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False, clockwise=True): 'Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha+0.5h\\sin\\alpha\n \\\\\n y_{center}-0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha-0.5h\\sin\\alpha\n \\\\\n y_{center}+0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n ' assert (mode in ['iou', 'iof']) mode_dict = {'iou': 0, 'iof': 1} mode_flag = mode_dict[mode] rows = bboxes1.size(0) cols = bboxes2.size(0) if aligned: ious = bboxes1.new_zeros(rows) else: ious = bboxes1.new_zeros((rows * cols)) if (not clockwise): flip_mat = bboxes1.new_ones(bboxes1.shape[(- 1)]) flip_mat[(- 1)] = (- 1) bboxes1 = (bboxes1 * flip_mat) bboxes2 = (bboxes2 * flip_mat) bboxes1 = bboxes1.contiguous() bboxes2 = bboxes2.contiguous() ext_module.box_iou_rotated(bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned) if (not aligned): ious = ious.view(rows, cols) return ious
class CARAFENaiveFunction(Function): @staticmethod def symbolic(g, features, masks, kernel_size, group_size, scale_factor): return g.op('mmcv::MMCVCARAFENaive', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor) @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor): assert (scale_factor >= 1) assert (masks.size(1) == ((kernel_size * kernel_size) * group_size)) assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor)) assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor)) assert ((features.size(1) % group_size) == 0) assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1)) ctx.kernel_size = kernel_size ctx.group_size = group_size ctx.scale_factor = scale_factor ctx.feature_size = features.size() ctx.mask_size = masks.size() (n, c, h, w) = features.size() output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor))) ext_module.carafe_naive_forward(features, masks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) if (features.requires_grad or masks.requires_grad): ctx.save_for_backward(features, masks) return output @staticmethod def backward(ctx, grad_output): assert grad_output.is_cuda (features, masks) = ctx.saved_tensors kernel_size = ctx.kernel_size group_size = ctx.group_size scale_factor = ctx.scale_factor grad_input = torch.zeros_like(features) grad_masks = torch.zeros_like(masks) ext_module.carafe_naive_backward(grad_output.contiguous(), features, masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) return (grad_input, grad_masks, None, None, None)
class CARAFENaive(Module): def __init__(self, kernel_size, group_size, scale_factor): super(CARAFENaive, self).__init__() assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int)) self.kernel_size = kernel_size self.group_size = group_size self.scale_factor = scale_factor def forward(self, features, masks): return carafe_naive(features, masks, self.kernel_size, self.group_size, self.scale_factor)
class CARAFEFunction(Function): @staticmethod def symbolic(g, features, masks, kernel_size, group_size, scale_factor): return g.op('mmcv::MMCVCARAFE', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor) @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor): assert (scale_factor >= 1) assert (masks.size(1) == ((kernel_size * kernel_size) * group_size)) assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor)) assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor)) assert ((features.size(1) % group_size) == 0) assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1)) ctx.kernel_size = kernel_size ctx.group_size = group_size ctx.scale_factor = scale_factor ctx.feature_size = features.size() ctx.mask_size = masks.size() (n, c, h, w) = features.size() output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor))) routput = features.new_zeros(output.size(), requires_grad=False) rfeatures = features.new_zeros(features.size(), requires_grad=False) rmasks = masks.new_zeros(masks.size(), requires_grad=False) ext_module.carafe_forward(features, masks, rfeatures, routput, rmasks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) if (features.requires_grad or masks.requires_grad): ctx.save_for_backward(features, masks, rfeatures) return output @staticmethod def backward(ctx, grad_output): assert grad_output.is_cuda (features, masks, rfeatures) = ctx.saved_tensors kernel_size = ctx.kernel_size group_size = ctx.group_size scale_factor = ctx.scale_factor rgrad_output = torch.zeros_like(grad_output, requires_grad=False) rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False) rgrad_input = torch.zeros_like(features, requires_grad=False) rgrad_masks = torch.zeros_like(masks, requires_grad=False) grad_input = torch.zeros_like(features, requires_grad=False) grad_masks = torch.zeros_like(masks, requires_grad=False) ext_module.carafe_backward(grad_output.contiguous(), rfeatures, masks, rgrad_output, rgrad_input_hs, rgrad_input, rgrad_masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) return (grad_input, grad_masks, None, None, None)
class CARAFE(Module): ' CARAFE: Content-Aware ReAssembly of FEatures\n\n Please refer to `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_ for more details.\n\n Args:\n kernel_size (int): reassemble kernel size\n group_size (int): reassemble group size\n scale_factor (int): upsample ratio\n\n Returns:\n upsampled feature map\n ' def __init__(self, kernel_size, group_size, scale_factor): super(CARAFE, self).__init__() assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int)) self.kernel_size = kernel_size self.group_size = group_size self.scale_factor = scale_factor def forward(self, features, masks): return carafe(features, masks, self.kernel_size, self.group_size, self.scale_factor)
@UPSAMPLE_LAYERS.register_module(name='carafe') class CARAFEPack(nn.Module): 'A unified package of CARAFE upsampler that contains: 1) channel\n compressor 2) content encoder 3) CARAFE op.\n\n Official implementation of ICCV 2019 paper\n `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n ' def __init__(self, channels, scale_factor, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64): super(CARAFEPack, self).__init__() self.channels = channels self.scale_factor = scale_factor self.up_kernel = up_kernel self.up_group = up_group self.encoder_kernel = encoder_kernel self.encoder_dilation = encoder_dilation self.compressed_channels = compressed_channels self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, 1) self.content_encoder = nn.Conv2d(self.compressed_channels, ((((self.up_kernel * self.up_kernel) * self.up_group) * self.scale_factor) * self.scale_factor), self.encoder_kernel, padding=int((((self.encoder_kernel - 1) * self.encoder_dilation) / 2)), dilation=self.encoder_dilation, groups=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') normal_init(self.content_encoder, std=0.001) def kernel_normalizer(self, mask): mask = F.pixel_shuffle(mask, self.scale_factor) (n, mask_c, h, w) = mask.size() mask_channel = int((mask_c / float((self.up_kernel ** 2)))) mask = mask.view(n, mask_channel, (- 1), h, w) mask = F.softmax(mask, dim=2, dtype=mask.dtype) mask = mask.view(n, mask_c, h, w).contiguous() return mask def feature_reassemble(self, x, mask): x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor) return x def forward(self, x): compressed_x = self.channel_compressor(x) mask = self.content_encoder(compressed_x) mask = self.kernel_normalizer(mask) x = self.feature_reassemble(x, mask) return x
def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num): 'Expand kernel contours so that foreground pixels are assigned into\n instances.\n\n Args:\n kernel_mask (np.array or torch.Tensor): The instance kernel mask with\n size hxw.\n internal_kernel_label (np.array or torch.Tensor): The instance internal\n kernel label with size hxw.\n min_kernel_area (int): The minimum kernel area.\n kernel_num (int): The instance kernel number.\n\n Returns:\n list: The instance index map with size hxw.\n ' assert isinstance(kernel_mask, (torch.Tensor, np.ndarray)) assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray)) assert isinstance(min_kernel_area, int) assert isinstance(kernel_num, int) if isinstance(kernel_mask, np.ndarray): kernel_mask = torch.from_numpy(kernel_mask) if isinstance(internal_kernel_label, np.ndarray): internal_kernel_label = torch.from_numpy(internal_kernel_label) if (torch.__version__ == 'parrots'): if ((kernel_mask.shape[0] == 0) or (internal_kernel_label.shape[0] == 0)): label = [] else: label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area=min_kernel_area, kernel_num=kernel_num) label = label.tolist() else: label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num) return label
class TopPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top'])) return output @staticmethod def forward(ctx, input): output = ext_module.top_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.top_pool_backward(input, grad_output) return output
class BottomPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom'])) return output @staticmethod def forward(ctx, input): output = ext_module.bottom_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.bottom_pool_backward(input, grad_output) return output
class LeftPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left'])) return output @staticmethod def forward(ctx, input): output = ext_module.left_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.left_pool_backward(input, grad_output) return output
class RightPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right'])) return output @staticmethod def forward(ctx, input): output = ext_module.right_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.right_pool_backward(input, grad_output) return output
class CornerPool(nn.Module): "Corner Pooling.\n\n Corner Pooling is a new type of pooling layer that helps a\n convolutional network better localize corners of bounding boxes.\n\n Please refer to `CornerNet: Detecting Objects as Paired Keypoints\n <https://arxiv.org/abs/1808.01244>`_ for more details.\n\n Code is modified from https://github.com/princeton-vl/CornerNet-Lite.\n\n Args:\n mode (str): Pooling orientation for the pooling layer\n\n - 'bottom': Bottom Pooling\n - 'left': Left Pooling\n - 'right': Right Pooling\n - 'top': Top Pooling\n\n Returns:\n Feature map after pooling.\n " pool_functions = {'bottom': BottomPoolFunction, 'left': LeftPoolFunction, 'right': RightPoolFunction, 'top': TopPoolFunction} cummax_dim_flip = {'bottom': (2, False), 'left': (3, True), 'right': (3, False), 'top': (2, True)} def __init__(self, mode): super(CornerPool, self).__init__() assert (mode in self.pool_functions) self.mode = mode self.corner_pool = self.pool_functions[mode] def forward(self, x): if ((torch.__version__ != 'parrots') and (torch.__version__ >= '1.5.0')): if torch.onnx.is_in_onnx_export(): assert (torch.__version__ >= '1.7.0'), "When `cummax` serves as an intermediate component whose outputs is used as inputs for another modules, it's expected that pytorch version must be >= 1.7.0, otherwise Error appears like: `RuntimeError: tuple appears in op that does not forward tuples, unsupported kind: prim::PythonOp`." (dim, flip) = self.cummax_dim_flip[self.mode] if flip: x = x.flip(dim) (pool_tensor, _) = torch.cummax(x, dim=dim) if flip: pool_tensor = pool_tensor.flip(dim) return pool_tensor else: return self.corner_pool.apply(x)
class CorrelationFunction(Function): @staticmethod def forward(ctx, input1, input2, kernel_size=1, max_displacement=1, stride=1, padding=1, dilation=1, dilation_patch=1): ctx.save_for_backward(input1, input2) (kH, kW) = ctx.kernel_size = _pair(kernel_size) patch_size = ((max_displacement * 2) + 1) ctx.patch_size = patch_size (dH, dW) = ctx.stride = _pair(stride) (padH, padW) = ctx.padding = _pair(padding) (dilationH, dilationW) = ctx.dilation = _pair(dilation) (dilation_patchH, dilation_patchW) = ctx.dilation_patch = _pair(dilation_patch) output_size = CorrelationFunction._output_size(ctx, input1) output = input1.new_zeros(output_size) ext_module.correlation_forward(input1, input2, output, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input1, input2) = ctx.saved_tensors (kH, kW) = ctx.kernel_size patch_size = ctx.patch_size (padH, padW) = ctx.padding (dilationH, dilationW) = ctx.dilation (dilation_patchH, dilation_patchW) = ctx.dilation_patch (dH, dW) = ctx.stride grad_input1 = torch.zeros_like(input1) grad_input2 = torch.zeros_like(input2) ext_module.correlation_backward(grad_output, input1, input2, grad_input1, grad_input2, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW) return (grad_input1, grad_input2, None, None, None, None, None, None) @staticmethod def _output_size(ctx, input1): (iH, iW) = (input1.size(2), input1.size(3)) batch_size = input1.size(0) (kH, kW) = ctx.kernel_size patch_size = ctx.patch_size (dH, dW) = ctx.stride (padH, padW) = ctx.padding (dilationH, dilationW) = ctx.dilation dilatedKH = (((kH - 1) * dilationH) + 1) dilatedKW = (((kW - 1) * dilationW) + 1) oH = int(((((iH + (2 * padH)) - dilatedKH) / dH) + 1)) oW = int(((((iW + (2 * padW)) - dilatedKW) / dW) + 1)) output_size = (batch_size, patch_size, patch_size, oH, oW) return output_size
class Correlation(nn.Module): "Correlation operator\n\n This correlation operator works for optical flow correlation computation.\n\n There are two batched tensors with shape :math:`(N, C, H, W)`,\n and the correlation output's shape is :math:`(N, max\\_displacement \\times\n 2 + 1, max\\_displacement * 2 + 1, H_{out}, W_{out})`\n\n where\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times padding -\n dilation \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times padding - dilation\n \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding\n window convolution between input1 and shifted input2,\n\n .. math::\n Corr(N_i, dx, dy) =\n \\sum_{c=0}^{C-1}\n input1(N_i, c) \\star\n \\mathcal{S}(input2(N_i, c), dy, dx)\n\n where :math:`\\star` is the valid 2d sliding window convolution operator,\n and :math:`\\mathcal{S}` means shifting the input features (auto-complete\n zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \\in\n [-max\\_displacement \\times dilation\\_patch, max\\_displacement \\times\n dilation\\_patch]`.\n\n Args:\n kernel_size (int): The size of sliding window i.e. local neighborhood\n representing the center points and involved in correlation\n computation. Defaults to 1.\n max_displacement (int): The radius for computing correlation volume,\n but the actual working space can be dilated by dilation_patch.\n Defaults to 1.\n stride (int): The stride of the sliding blocks in the input spatial\n dimensions. Defaults to 1.\n padding (int): Zero padding added to all four sides of the input1.\n Defaults to 0.\n dilation (int): The spacing of local neighborhood that will involved\n in correlation. Defaults to 1.\n dilation_patch (int): The spacing between position need to compute\n correlation. Defaults to 1.\n " def __init__(self, kernel_size: int=1, max_displacement: int=1, stride: int=1, padding: int=0, dilation: int=1, dilation_patch: int=1) -> None: super().__init__() self.kernel_size = kernel_size self.max_displacement = max_displacement self.stride = stride self.padding = padding self.dilation = dilation self.dilation_patch = dilation_patch def forward(self, input1: Tensor, input2: Tensor) -> Tensor: return CorrelationFunction.apply(input1, input2, self.kernel_size, self.max_displacement, self.stride, self.padding, self.dilation, self.dilation_patch) def __repr__(self) -> str: s = self.__class__.__name__ s += f'(kernel_size={self.kernel_size}, ' s += f'max_displacement={self.max_displacement}, ' s += f'stride={self.stride}, ' s += f'padding={self.padding}, ' s += f'dilation={self.dilation}, ' s += f'dilation_patch={self.dilation_patch})' return s
class DeformRoIPoolFunction(Function): @staticmethod def symbolic(g, input, rois, offset, output_size, spatial_scale, sampling_ratio, gamma): return g.op('mmcv::MMCVDeformRoIPool', input, rois, offset, pooled_height_i=output_size[0], pooled_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_f=sampling_ratio, gamma_f=gamma) @staticmethod def forward(ctx, input, rois, offset, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): if (offset is None): offset = input.new_zeros(0) ctx.output_size = _pair(output_size) ctx.spatial_scale = float(spatial_scale) ctx.sampling_ratio = int(sampling_ratio) ctx.gamma = float(gamma) assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!' output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1]) output = input.new_zeros(output_shape) ext_module.deform_roi_pool_forward(input, rois, offset, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma) ctx.save_for_backward(input, rois, offset) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, rois, offset) = ctx.saved_tensors grad_input = grad_output.new_zeros(input.shape) grad_offset = grad_output.new_zeros(offset.shape) ext_module.deform_roi_pool_backward(grad_output, input, rois, offset, grad_input, grad_offset, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma) if (grad_offset.numel() == 0): grad_offset = None return (grad_input, None, grad_offset, None, None, None, None)
class DeformRoIPool(nn.Module): def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(DeformRoIPool, self).__init__() self.output_size = _pair(output_size) self.spatial_scale = float(spatial_scale) self.sampling_ratio = int(sampling_ratio) self.gamma = float(gamma) def forward(self, input, rois, offset=None): return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
class DeformRoIPoolPack(DeformRoIPool): def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma) self.output_channels = output_channels self.deform_fc_channels = deform_fc_channels self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2))) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() def forward(self, input, rois): assert (input.size(1) == self.output_channels) x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) rois_num = rois.size(0) offset = self.offset_fc(x.view(rois_num, (- 1))) offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1]) return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
class ModulatedDeformRoIPoolPack(DeformRoIPool): def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(ModulatedDeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma) self.output_channels = output_channels self.deform_fc_channels = deform_fc_channels self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2))) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() self.mask_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 1)), nn.Sigmoid()) self.mask_fc[2].weight.data.zero_() self.mask_fc[2].bias.data.zero_() def forward(self, input, rois): assert (input.size(1) == self.output_channels) x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) rois_num = rois.size(0) offset = self.offset_fc(x.view(rois_num, (- 1))) offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1]) mask = self.mask_fc(x.view(rois_num, (- 1))) mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) d = deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) return (d * mask)
class Conv2d_deprecated(Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing Conv2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class ConvTranspose2d_deprecated(ConvTranspose2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing ConvTranspose2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class MaxPool2d_deprecated(MaxPool2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class Linear_deprecated(Linear): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing Linear wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class SigmoidFocalLossFunction(Function): @staticmethod def symbolic(g, input, target, gamma, alpha, weight, reduction): return g.op('mmcv::MMCVSigmoidFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction) @staticmethod def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'): assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) assert (input.dim() == 2) assert (target.dim() == 1) assert (input.size(0) == target.size(0)) if (weight is None): weight = input.new_empty(0) else: assert (weight.dim() == 1) assert (input.size(1) == weight.size(0)) ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} assert (reduction in ctx.reduction_dict.keys()) ctx.gamma = float(gamma) ctx.alpha = float(alpha) ctx.reduction = ctx.reduction_dict[reduction] output = input.new_zeros(input.size()) ext_module.sigmoid_focal_loss_forward(input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) if (ctx.reduction == ctx.reduction_dict['mean']): output = (output.sum() / input.size(0)) elif (ctx.reduction == ctx.reduction_dict['sum']): output = output.sum() ctx.save_for_backward(input, target, weight) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, target, weight) = ctx.saved_tensors grad_input = input.new_zeros(input.size()) ext_module.sigmoid_focal_loss_backward(input, target, weight, grad_input, gamma=ctx.gamma, alpha=ctx.alpha) grad_input *= grad_output if (ctx.reduction == ctx.reduction_dict['mean']): grad_input /= input.size(0) return (grad_input, None, None, None, None, None)
class SigmoidFocalLoss(nn.Module): def __init__(self, gamma, alpha, weight=None, reduction='mean'): super(SigmoidFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.register_buffer('weight', weight) self.reduction = reduction def forward(self, input, target): return sigmoid_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction) def __repr__(self): s = self.__class__.__name__ s += f'(gamma={self.gamma}, ' s += f'alpha={self.alpha}, ' s += f'reduction={self.reduction})' return s
class SoftmaxFocalLossFunction(Function): @staticmethod def symbolic(g, input, target, gamma, alpha, weight, reduction): return g.op('mmcv::MMCVSoftmaxFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction) @staticmethod def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'): assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) assert (input.dim() == 2) assert (target.dim() == 1) assert (input.size(0) == target.size(0)) if (weight is None): weight = input.new_empty(0) else: assert (weight.dim() == 1) assert (input.size(1) == weight.size(0)) ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} assert (reduction in ctx.reduction_dict.keys()) ctx.gamma = float(gamma) ctx.alpha = float(alpha) ctx.reduction = ctx.reduction_dict[reduction] (channel_stats, _) = torch.max(input, dim=1) input_softmax = (input - channel_stats.unsqueeze(1).expand_as(input)) input_softmax.exp_() channel_stats = input_softmax.sum(dim=1) input_softmax /= channel_stats.unsqueeze(1).expand_as(input) output = input.new_zeros(input.size(0)) ext_module.softmax_focal_loss_forward(input_softmax, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) if (ctx.reduction == ctx.reduction_dict['mean']): output = (output.sum() / input.size(0)) elif (ctx.reduction == ctx.reduction_dict['sum']): output = output.sum() ctx.save_for_backward(input_softmax, target, weight) return output @staticmethod def backward(ctx, grad_output): (input_softmax, target, weight) = ctx.saved_tensors buff = input_softmax.new_zeros(input_softmax.size(0)) grad_input = input_softmax.new_zeros(input_softmax.size()) ext_module.softmax_focal_loss_backward(input_softmax, target, weight, buff, grad_input, gamma=ctx.gamma, alpha=ctx.alpha) grad_input *= grad_output if (ctx.reduction == ctx.reduction_dict['mean']): grad_input /= input_softmax.size(0) return (grad_input, None, None, None, None, None)
class SoftmaxFocalLoss(nn.Module): def __init__(self, gamma, alpha, weight=None, reduction='mean'): super(SoftmaxFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.register_buffer('weight', weight) self.reduction = reduction def forward(self, input, target): return softmax_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction) def __repr__(self): s = self.__class__.__name__ s += f'(gamma={self.gamma}, ' s += f'alpha={self.alpha}, ' s += f'reduction={self.reduction})' return s
class FurthestPointSampling(Function): 'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.' @staticmethod def forward(ctx, points_xyz: torch.Tensor, num_points: int) -> torch.Tensor: '\n Args:\n points_xyz (torch.Tensor): (B, N, 3) where N > num_points.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n ' assert points_xyz.is_contiguous() (B, N) = points_xyz.size()[:2] output = torch.cuda.IntTensor(B, num_points) temp = torch.cuda.FloatTensor(B, N).fill_(10000000000.0) ext_module.furthest_point_sampling_forward(points_xyz, temp, output, b=B, n=N, m=num_points) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(output) return output @staticmethod def backward(xyz, a=None): return (None, None)
class FurthestPointSamplingWithDist(Function): 'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.' @staticmethod def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor: '\n Args:\n points_dist (torch.Tensor): (B, N, N) Distance between each point\n pair.\n num_points (int): Number of points in the sampled set.\n\n Returns:\n torch.Tensor: (B, num_points) indices of the sampled points.\n ' assert points_dist.is_contiguous() (B, N, _) = points_dist.size() output = points_dist.new_zeros([B, num_points], dtype=torch.int32) temp = points_dist.new_zeros([B, N]).fill_(10000000000.0) ext_module.furthest_point_sampling_with_dist_forward(points_dist, temp, output, b=B, n=N, m=num_points) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(output) return output @staticmethod def backward(xyz, a=None): return (None, None)
class FusedBiasLeakyReLUFunctionBackward(Function): 'Calculate second order deviation.\n\n This function is to compute the second order deviation for the fused leaky\n relu operation.\n ' @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = ext_module.fused_bias_leakyrelu(grad_output, empty, out, act=3, grad=1, alpha=negative_slope, scale=scale) dim = [0] if (grad_input.ndim > 2): dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return (grad_input, grad_bias) @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): (out,) = ctx.saved_tensors gradgrad_out = ext_module.fused_bias_leakyrelu(gradgrad_input, gradgrad_bias.to(out.dtype), out, act=3, grad=1, alpha=ctx.negative_slope, scale=ctx.scale) return (gradgrad_out, None, None, None)
class FusedBiasLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = ext_module.fused_bias_leakyrelu(input, bias, empty, act=3, grad=0, alpha=negative_slope, scale=scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): (out,) = ctx.saved_tensors (grad_input, grad_bias) = FusedBiasLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale) return (grad_input, grad_bias, None, None)
class FusedBiasLeakyReLU(nn.Module): 'Fused bias leaky ReLU.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n TODO: Implement the CPU version.\n\n Args:\n channel (int): The channel number of the feature map.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n ' def __init__(self, num_channels, negative_slope=0.2, scale=(2 ** 0.5)): super(FusedBiasLeakyReLU, self).__init__() self.bias = nn.Parameter(torch.zeros(num_channels)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_bias_leakyrelu(input, self.bias, self.negative_slope, self.scale)
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)): 'Fused bias leaky ReLU function.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance of the feature map or gradients unchanged, they also adopt a\n scale similarly with Kaiming initialization. However, since the\n :math:`1+{alpha}^2` is too small, we can just ignore it. Therefore, the\n final scale is just :math:`\\sqrt{2}`. Of course, you may change it with\n your own scale.\n\n Args:\n input (torch.Tensor): Input feature map.\n bias (nn.Parameter): The bias from convolution operation.\n negative_slope (float, optional): Same as nn.LeakyRelu.\n Defaults to 0.2.\n scale (float, optional): A scalar to adjust the variance of the feature\n map. Defaults to 2**0.5.\n\n Returns:\n torch.Tensor: Feature map after non-linear activation.\n ' if (not input.is_cuda): return bias_leakyrelu_ref(input, bias, negative_slope, scale) return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype), negative_slope, scale)
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=(2 ** 0.5)): if (bias is not None): assert (bias.ndim == 1) assert (bias.shape[0] == x.shape[1]) x = (x + bias.reshape([((- 1) if (i == 1) else 1) for i in range(x.ndim)])) x = F.leaky_relu(x, negative_slope) if (scale != 1): x = (x * scale) return x
class GatherPoints(Function): 'Gather points with given index.' @staticmethod def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: '\n Args:\n features (torch.Tensor): (B, C, N) features to gather.\n indices (torch.Tensor): (B, M) where M is the number of points.\n\n Returns:\n torch.Tensor: (B, C, M) where M is the number of points.\n ' assert features.is_contiguous() assert indices.is_contiguous() (B, npoint) = indices.size() (_, C, N) = features.size() output = features.new_zeros((B, C, npoint)) ext_module.gather_points_forward(features, indices, output, b=B, c=C, n=N, npoints=npoint) ctx.for_backwards = (indices, C, N) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(indices) return output @staticmethod def backward(ctx, grad_out): (idx, C, N) = ctx.for_backwards (B, npoint) = idx.size() grad_features = grad_out.new_zeros((B, C, N)) grad_out_data = grad_out.data.contiguous() ext_module.gather_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint) return (grad_features, None)
def get_onnxruntime_op_path(): wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_ort.*.so') paths = glob.glob(wildcard) if (len(paths) > 0): return paths[0] else: return ''
def boxes_iou_bev(boxes_a, boxes_b): "Calculate boxes IoU in the Bird's Eye View.\n\n Args:\n boxes_a (torch.Tensor): Input boxes a with shape (M, 5).\n boxes_b (torch.Tensor): Input boxes b with shape (N, 5).\n\n Returns:\n torch.Tensor: IoU result with shape (M, N).\n " ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) return ans_iou
def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): 'NMS function GPU implementation (for BEV boxes). The overlap of two\n boxes for IoU calculation is defined as the exact overlapping area of the\n two boxes. In this function, one can also set ``pre_max_size`` and\n ``post_max_size``.\n\n Args:\n boxes (torch.Tensor): Input boxes with the shape of [N, 5]\n ([x1, y1, x2, y2, ry]).\n scores (torch.Tensor): Scores of boxes with the shape of [N].\n thresh (float): Overlap threshold of NMS.\n pre_max_size (int, optional): Max size of boxes before NMS.\n Default: None.\n post_max_size (int, optional): Max size of boxes after NMS.\n Default: None.\n\n Returns:\n torch.Tensor: Indexes after NMS.\n ' assert (boxes.size(1) == 5), 'Input boxes shape should be [N, 5]' order = scores.sort(0, descending=True)[1] if (pre_max_size is not None): order = order[:pre_max_size] boxes = boxes[order].contiguous() keep = torch.zeros(boxes.size(0), dtype=torch.long) num_out = torch.zeros(size=(), dtype=torch.long) ext_module.iou3d_nms_forward(boxes, keep, num_out, nms_overlap_thresh=thresh) keep = order[keep[:num_out].cuda(boxes.device)].contiguous() if (post_max_size is not None): keep = keep[:post_max_size] return keep
def nms_normal_bev(boxes, scores, thresh): 'Normal NMS function GPU implementation (for BEV boxes). The overlap of\n two boxes for IoU calculation is defined as the exact overlapping area of\n the two boxes WITH their yaw angle set to 0.\n\n Args:\n boxes (torch.Tensor): Input boxes with shape (N, 5).\n scores (torch.Tensor): Scores of predicted boxes with shape (N).\n thresh (float): Overlap threshold of NMS.\n\n Returns:\n torch.Tensor: Remaining indices with scores in descending order.\n ' assert (boxes.shape[1] == 5), 'Input boxes shape should be [N, 5]' order = scores.sort(0, descending=True)[1] boxes = boxes[order].contiguous() keep = torch.zeros(boxes.size(0), dtype=torch.long) num_out = torch.zeros(size=(), dtype=torch.long) ext_module.iou3d_nms_normal_forward(boxes, keep, num_out, nms_overlap_thresh=thresh) return order[keep[:num_out].cuda(boxes.device)].contiguous()
class KNN(Function): 'KNN (CUDA) based on heap data structure.\n\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/pointops/src/knnquery_heap>`_.\n\n Find k-nearest points.\n ' @staticmethod def forward(ctx, k: int, xyz: torch.Tensor, center_xyz: torch.Tensor=None, transposed: bool=False) -> torch.Tensor: '\n Args:\n k (int): number of nearest neighbors.\n xyz (torch.Tensor): (B, N, 3) if transposed == False, else\n (B, 3, N). xyz coordinates of the features.\n center_xyz (torch.Tensor, optional): (B, npoint, 3) if transposed\n is False, else (B, 3, npoint). centers of the knn query.\n Default: None.\n transposed (bool, optional): whether the input tensors are\n transposed. Should not explicitly use this keyword when\n calling knn (=KNN.apply), just add the fourth param.\n Default: False.\n\n Returns:\n torch.Tensor: (B, k, npoint) tensor with the indices of the\n features that form k-nearest neighbours.\n ' assert ((k > 0) & (k < 100)), 'k should be in range(0, 100)' if (center_xyz is None): center_xyz = xyz if transposed: xyz = xyz.transpose(2, 1).contiguous() center_xyz = center_xyz.transpose(2, 1).contiguous() assert xyz.is_contiguous() assert center_xyz.is_contiguous() center_xyz_device = center_xyz.get_device() assert (center_xyz_device == xyz.get_device()), 'center_xyz and xyz should be put on the same device' if (torch.cuda.current_device() != center_xyz_device): torch.cuda.set_device(center_xyz_device) (B, npoint, _) = center_xyz.shape N = xyz.shape[1] idx = center_xyz.new_zeros((B, npoint, k)).int() dist2 = center_xyz.new_zeros((B, npoint, k)).float() ext_module.knn_forward(xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k) idx = idx.transpose(2, 1).contiguous() if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(idx) return idx @staticmethod def backward(ctx, a=None): return (None, None, None)
class BaseMergeCell(nn.Module): 'The basic class for cells used in NAS-FPN and NAS-FCOS.\n\n BaseMergeCell takes 2 inputs. After applying convolution\n on them, they are resized to the target size. Then,\n they go through binary_op, which depends on the type of cell.\n If with_out_conv is True, the result of output will go through\n another convolution layer.\n\n Args:\n in_channels (int): number of input channels in out_conv layer.\n out_channels (int): number of output channels in out_conv layer.\n with_out_conv (bool): Whether to use out_conv layer\n out_conv_cfg (dict): Config dict for convolution layer, which should\n contain "groups", "kernel_size", "padding", "bias" to build\n out_conv layer.\n out_norm_cfg (dict): Config dict for normalization layer in out_conv.\n out_conv_order (tuple): The order of conv/norm/activation layers in\n out_conv.\n with_input1_conv (bool): Whether to use convolution on input1.\n with_input2_conv (bool): Whether to use convolution on input2.\n input_conv_cfg (dict): Config dict for building input1_conv layer and\n input2_conv layer, which is expected to contain the type of\n convolution.\n Default: None, which means using conv2d.\n input_norm_cfg (dict): Config dict for normalization layer in\n input1_conv and input2_conv layer. Default: None.\n upsample_mode (str): Interpolation method used to resize the output\n of input1_conv and input2_conv to target size. Currently, we\n support [\'nearest\', \'bilinear\']. Default: \'nearest\'.\n ' def __init__(self, fused_channels=256, out_channels=256, with_out_conv=True, out_conv_cfg=dict(groups=1, kernel_size=3, padding=1, bias=True), out_norm_cfg=None, out_conv_order=('act', 'conv', 'norm'), with_input1_conv=False, with_input2_conv=False, input_conv_cfg=None, input_norm_cfg=None, upsample_mode='nearest'): super(BaseMergeCell, self).__init__() assert (upsample_mode in ['nearest', 'bilinear']) self.with_out_conv = with_out_conv self.with_input1_conv = with_input1_conv self.with_input2_conv = with_input2_conv self.upsample_mode = upsample_mode if self.with_out_conv: self.out_conv = ConvModule(fused_channels, out_channels, **out_conv_cfg, norm_cfg=out_norm_cfg, order=out_conv_order) self.input1_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input1_conv else nn.Sequential()) self.input2_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input2_conv else nn.Sequential()) def _build_input_conv(self, channel, conv_cfg, norm_cfg): return ConvModule(channel, channel, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True) @abstractmethod def _binary_op(self, x1, x2): pass def _resize(self, x, size): if (x.shape[(- 2):] == size): return x elif (x.shape[(- 2):] < size): return F.interpolate(x, size=size, mode=self.upsample_mode) else: assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0)) kernel_size = (x.shape[(- 1)] // size[(- 1)]) x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size) return x def forward(self, x1, x2, out_size=None): assert (x1.shape[:2] == x2.shape[:2]) assert ((out_size is None) or (len(out_size) == 2)) if (out_size is None): out_size = max(x1.size()[2:], x2.size()[2:]) x1 = self.input1_conv(x1) x2 = self.input2_conv(x2) x1 = self._resize(x1, out_size) x2 = self._resize(x2, out_size) x = self._binary_op(x1, x2) if self.with_out_conv: x = self.out_conv(x) return x
class SumCell(BaseMergeCell): def __init__(self, in_channels, out_channels, **kwargs): super(SumCell, self).__init__(in_channels, out_channels, **kwargs) def _binary_op(self, x1, x2): return (x1 + x2)
class ConcatCell(BaseMergeCell): def __init__(self, in_channels, out_channels, **kwargs): super(ConcatCell, self).__init__((in_channels * 2), out_channels, **kwargs) def _binary_op(self, x1, x2): ret = torch.cat([x1, x2], dim=1) return ret
class GlobalPoolingCell(BaseMergeCell): def __init__(self, in_channels=None, out_channels=None, **kwargs): super().__init__(in_channels, out_channels, **kwargs) self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) def _binary_op(self, x1, x2): x2_att = self.global_pool(x2).sigmoid() return (x2 + (x2_att * x1))
def min_area_polygons(pointsets): 'Find the smallest polygons that surrounds all points in the point sets.\n\n Args:\n pointsets (Tensor): point sets with shape (N, 18).\n\n Returns:\n torch.Tensor: Return the smallest polygons with shape (N, 8).\n ' polygons = pointsets.new_zeros((pointsets.size(0), 8)) ext_module.min_area_polygons(pointsets, polygons) return polygons
class ModulatedDeformConv2dFunction(Function): @staticmethod def symbolic(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups): input_tensors = [input, offset, mask, weight] if (bias is not None): input_tensors.append(bias) return g.op('mmcv::MMCVModulatedDeformConv2d', *input_tensors, stride_i=stride, padding_i=padding, dilation_i=dilation, groups_i=groups, deform_groups_i=deform_groups) @staticmethod def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deform_groups=1): if ((input is not None) and (input.dim() != 4)): raise ValueError(f'Expected 4D tensor as input, got {input.dim()}D tensor instead.') ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deform_groups = deform_groups ctx.with_bias = (bias is not None) if (not ctx.with_bias): bias = input.new_empty(0) input = input.type_as(offset) weight = weight.type_as(input) bias = bias.type_as(input) ctx.save_for_backward(input, offset, mask, weight, bias) output = input.new_empty(ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] ext_module.modulated_deform_conv_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, offset, mask, weight, bias) = ctx.saved_tensors grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) grad_mask = torch.zeros_like(mask) grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) grad_output = grad_output.contiguous() ext_module.modulated_deform_conv_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias) if (not ctx.with_bias): grad_bias = None return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None) @staticmethod def _output_size(ctx, input, weight): channels = weight.size(0) output_size = (input.size(0), channels) for d in range((input.dim() - 2)): in_size = input.size((d + 2)) pad = ctx.padding[d] kernel = ((ctx.dilation[d] * (weight.size((d + 2)) - 1)) + 1) stride_ = ctx.stride[d] output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),) if (not all(map((lambda s: (s > 0)), output_size))): raise ValueError((('convolution input is too small (output would be ' + 'x'.join(map(str, output_size))) + ')')) return output_size
class ModulatedDeformConv2d(nn.Module): @deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='ModulatedDeformConv2d') def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=True): super(ModulatedDeformConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deform_groups = deform_groups self.transposed = False self.output_padding = _single(0) self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // groups), *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.init_weights() def init_weights(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = (1.0 / math.sqrt(n)) self.weight.data.uniform_((- stdv), stdv) if (self.bias is not None): self.bias.data.zero_() def forward(self, x, offset, mask): return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
@CONV_LAYERS.register_module('DCNv2') class ModulatedDeformConv2dPack(ModulatedDeformConv2d): 'A ModulatedDeformable Conv Encapsulation that acts as normal Conv\n layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int): Same as nn.Conv2d, while tuple is not supported.\n padding (int): Same as nn.Conv2d, while tuple is not supported.\n dilation (int): Same as nn.Conv2d, while tuple is not supported.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n ' _version = 2 def __init__(self, *args, **kwargs): super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d(self.in_channels, (((self.deform_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation, bias=True) self.init_weights() def init_weights(self): super(ModulatedDeformConv2dPack, self).init_weights() if hasattr(self, 'conv_offset'): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): out = self.conv_offset(x) (o1, o2, mask) = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if ((version is None) or (version < 2)): if (((prefix + 'conv_offset.weight') not in state_dict) and ((prefix[:(- 1)] + '_offset.weight') in state_dict)): state_dict[(prefix + 'conv_offset.weight')] = state_dict.pop((prefix[:(- 1)] + '_offset.weight')) if (((prefix + 'conv_offset.bias') not in state_dict) and ((prefix[:(- 1)] + '_offset.bias') in state_dict)): state_dict[(prefix + 'conv_offset.bias')] = state_dict.pop((prefix[:(- 1)] + '_offset.bias')) if ((version is not None) and (version > 1)): print_log(f"ModulatedDeformConvPack {prefix.rstrip('.')} is upgraded to version 2.", logger='root') super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold): 'Group pixels into text instances, which is widely used text detection\n methods.\n\n Arguments:\n score (np.array or torch.Tensor): The foreground score with size hxw.\n mask (np.array or Tensor): The foreground mask with size hxw.\n embedding (np.array or torch.Tensor): The embedding with size hxwxc to\n distinguish instances.\n kernel_label (np.array or torch.Tensor): The instance kernel index with\n size hxw.\n kernel_contour (np.array or torch.Tensor): The kernel contour with\n size hxw.\n kernel_region_num (int): The instance kernel region number.\n distance_threshold (float): The embedding distance threshold between\n kernel and pixel in one instance.\n\n Returns:\n list[list[float]]: The instance coordinates and attributes list. Each\n element consists of averaged confidence, pixel number, and coordinates\n (x_i, y_i for all pixels) in order.\n ' assert isinstance(score, (torch.Tensor, np.ndarray)) assert isinstance(mask, (torch.Tensor, np.ndarray)) assert isinstance(embedding, (torch.Tensor, np.ndarray)) assert isinstance(kernel_label, (torch.Tensor, np.ndarray)) assert isinstance(kernel_contour, (torch.Tensor, np.ndarray)) assert isinstance(kernel_region_num, int) assert isinstance(distance_threshold, float) if isinstance(score, np.ndarray): score = torch.from_numpy(score) if isinstance(mask, np.ndarray): mask = torch.from_numpy(mask) if isinstance(embedding, np.ndarray): embedding = torch.from_numpy(embedding) if isinstance(kernel_label, np.ndarray): kernel_label = torch.from_numpy(kernel_label) if isinstance(kernel_contour, np.ndarray): kernel_contour = torch.from_numpy(kernel_contour) if (torch.__version__ == 'parrots'): label = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num=kernel_region_num, distance_threshold=distance_threshold) label = label.tolist() label = label[0] list_index = kernel_region_num pixel_assignment = [] for x in range(kernel_region_num): pixel_assignment.append(np.array(label[list_index:(list_index + int(label[x]))], dtype=np.float)) list_index = (list_index + int(label[x])) else: pixel_assignment = ext_module.pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold) return pixel_assignment
def points_in_boxes_part(points, boxes): 'Find the box in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate.\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in\n LiDAR/DEPTH coordinate, (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M). Default background = -1.\n ' assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}' assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}' assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}' (batch_size, num_points, _) = points.shape box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_((- 1)) points_device = points.get_device() assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device' if (torch.cuda.current_device() != points_device): torch.cuda.set_device(points_device) ext_module.points_in_boxes_part_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) return box_idxs_of_pts
def points_in_boxes_cpu(points, boxes): 'Find all boxes in which each point is (CPU). The CPU version of\n :meth:`points_in_boxes_all`.\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in\n LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n ' assert (points.shape[0] == boxes.shape[0]), f'Points and boxes should have the same batch size, but got {points.shape[0]} and {boxes.shape[0]}' assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}' assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}' (batch_size, num_points, _) = points.shape num_boxes = boxes.shape[1] point_indices = points.new_zeros((batch_size, num_boxes, num_points), dtype=torch.int) for b in range(batch_size): ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), points[b].float().contiguous(), point_indices[b]) point_indices = point_indices.transpose(1, 2) return point_indices
def points_in_boxes_all(points, boxes): 'Find all boxes in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y, z) is the bottom center.\n\n Returns:\n torch.Tensor: Return the box indices of points with the shape of\n (B, M, T). Default background = 0.\n ' assert (boxes.shape[0] == points.shape[0]), f'Points and boxes should have the same batch size, but got {boxes.shape[0]} and {boxes.shape[0]}' assert (boxes.shape[2] == 7), f'boxes dimension should be 7, but got unexpected shape {boxes.shape[2]}' assert (points.shape[2] == 3), f'points dimension should be 3, but got unexpected shape {points.shape[2]}' (batch_size, num_points, _) = points.shape num_boxes = boxes.shape[1] box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), dtype=torch.int).fill_(0) points_device = points.get_device() assert (points_device == boxes.get_device()), 'Points and boxes should be put on the same device' if (torch.cuda.current_device() != points_device): torch.cuda.set_device(points_device) ext_module.points_in_boxes_all_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) return box_idxs_of_pts
def points_in_polygons(points, polygons): 'Judging whether points are inside polygons, which is used in the ATSS\n assignment for the rotated boxes.\n\n It should be noted that when the point is just at the polygon boundary, the\n judgment will be inaccurate, but the effect on assignment is limited.\n\n Args:\n points (torch.Tensor): It has shape (B, 2), indicating (x, y).\n M means the number of predicted points.\n polygons (torch.Tensor): It has shape (M, 8), indicating\n (x1, y1, x2, y2, x3, y3, x4, y4). M means the number of\n ground truth polygons.\n\n Returns:\n torch.Tensor: Return the result with the shape of (B, M),\n 1 indicates that the point is inside the polygon,\n 0 indicates that the point is outside the polygon.\n ' assert (points.shape[1] == 2), f'points dimension should be 2, but got unexpected shape {points.shape[1]}' assert (polygons.shape[1] == 8), f'polygons dimension should be 8, but got unexpected shape {polygons.shape[1]}' output = torch.full([points.shape[0], polygons.shape[0]], 0.0).cuda().float() ext_module.points_in_polygons_forward(points.contiguous(), polygons.contiguous(), output) return output
class PSAMaskFunction(Function): @staticmethod def symbolic(g, input, psa_type, mask_size): return g.op('mmcv::MMCVPSAMask', input, psa_type_i=psa_type, mask_size_i=mask_size) @staticmethod def forward(ctx, input, psa_type, mask_size): ctx.psa_type = psa_type ctx.mask_size = _pair(mask_size) ctx.save_for_backward(input) (h_mask, w_mask) = ctx.mask_size (batch_size, channels, h_feature, w_feature) = input.size() assert (channels == (h_mask * w_mask)) output = input.new_zeros((batch_size, (h_feature * w_feature), h_feature, w_feature)) ext_module.psamask_forward(input, output, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2)) return output @staticmethod def backward(ctx, grad_output): input = ctx.saved_tensors[0] psa_type = ctx.psa_type (h_mask, w_mask) = ctx.mask_size (batch_size, channels, h_feature, w_feature) = input.size() grad_input = grad_output.new_zeros((batch_size, channels, h_feature, w_feature)) ext_module.psamask_backward(grad_output, grad_input, psa_type=psa_type, num_=batch_size, h_feature=h_feature, w_feature=w_feature, h_mask=h_mask, w_mask=w_mask, half_h_mask=((h_mask - 1) // 2), half_w_mask=((w_mask - 1) // 2)) return (grad_input, None, None, None)
class PSAMask(nn.Module): def __init__(self, psa_type, mask_size=None): super(PSAMask, self).__init__() assert (psa_type in ['collect', 'distribute']) if (psa_type == 'collect'): psa_type_enum = 0 else: psa_type_enum = 1 self.psa_type_enum = psa_type_enum self.mask_size = mask_size self.psa_type = psa_type def forward(self, input): return psa_mask(input, self.psa_type_enum, self.mask_size) def __repr__(self): s = self.__class__.__name__ s += f'(psa_type={self.psa_type}, ' s += f'mask_size={self.mask_size})' return s
class RiRoIAlignRotatedFunction(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False): if isinstance(out_size, int): out_h = out_size out_w = out_size elif is_tuple_of(out_size, int): assert (len(out_size) == 2) (out_h, out_w) = out_size else: raise TypeError(f'"out_size" should be an integer or tuple of integers, but got {out_size}') ctx.spatial_scale = spatial_scale ctx.num_samples = num_samples ctx.num_orientations = num_orientations ctx.clockwise = clockwise ctx.save_for_backward(rois) ctx.feature_size = features.size() (batch_size, num_channels, _, _) = features.size() num_rois = rois.size(0) output = features.new_zeros(num_rois, num_channels, out_h, out_w) ext_module.riroi_align_rotated_forward(features, rois, output, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise) return output @staticmethod def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale num_orientations = ctx.num_orientations clockwise = ctx.clockwise num_samples = ctx.num_samples rois = ctx.saved_tensors[0] assert (feature_size is not None) (batch_size, num_channels, feature_h, feature_w) = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = rois.new_zeros(batch_size, num_channels, feature_h, feature_w) ext_module.riroi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=spatial_scale, num_samples=num_samples, num_orientations=num_orientations, clockwise=clockwise) return (grad_input, grad_rois, None, None, None, None, None)
class RiRoIAlignRotated(nn.Module): 'Rotation-invariant RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n The details are described in the paper `ReDet: A Rotation-equivariant\n Detector for Aerial Object Detection <https://arxiv.org/abs/2103.07733>`_.\n\n Args:\n out_size (tuple): fixed dimensional RoI output with shape (h, w).\n spatial_scale (float): scale the input boxes by this number\n num_samples (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n num_orientations (int): number of oriented channels.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n ' def __init__(self, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False): super(RiRoIAlignRotated, self).__init__() self.out_size = out_size self.spatial_scale = float(spatial_scale) self.num_samples = int(num_samples) self.num_orientations = int(num_orientations) self.clockwise = clockwise def forward(self, features, rois): return RiRoIAlignRotatedFunction.apply(features, rois, self.out_size, self.spatial_scale, self.num_samples, self.num_orientations, self.clockwise)
class RoIAlignFunction(Function): @staticmethod def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, pool_mode, aligned): from ..onnx import is_custom_op_loaded has_custom_op = is_custom_op_loaded() if has_custom_op: return g.op('mmcv::MMCVRoiAlign', input, rois, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, mode_s=pool_mode, aligned_i=aligned) else: from torch.onnx import TensorProtoDataType from torch.onnx.symbolic_helper import _slice_helper from torch.onnx.symbolic_opset9 import squeeze, sub batch_indices = _slice_helper(g, rois, axes=[1], starts=[0], ends=[1]) batch_indices = squeeze(g, batch_indices, 1) batch_indices = g.op('Cast', batch_indices, to_i=TensorProtoDataType.INT64) rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5]) if aligned: aligned_offset = g.op('Constant', value_t=torch.tensor([(0.5 / spatial_scale)], dtype=torch.float32)) rois = sub(g, rois, aligned_offset) return g.op('RoiAlign', input, rois, batch_indices, output_height_i=output_size[0], output_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_i=max(0, sampling_ratio), mode_s=pool_mode) @staticmethod def forward(ctx, input, rois, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True): ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio assert (pool_mode in ('max', 'avg')) ctx.pool_mode = (0 if (pool_mode == 'max') else 1) ctx.aligned = aligned ctx.input_shape = input.size() assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!' output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1]) output = input.new_zeros(output_shape) if (ctx.pool_mode == 0): argmax_y = input.new_zeros(output_shape) argmax_x = input.new_zeros(output_shape) else: argmax_y = input.new_zeros(0) argmax_x = input.new_zeros(0) ext_module.roi_align_forward(input, rois, output, argmax_y, argmax_x, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned) ctx.save_for_backward(rois, argmax_y, argmax_x) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (rois, argmax_y, argmax_x) = ctx.saved_tensors grad_input = grad_output.new_zeros(ctx.input_shape) grad_output = grad_output.contiguous() ext_module.roi_align_backward(grad_output, rois, argmax_y, argmax_x, grad_input, aligned_height=ctx.output_size[0], aligned_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, pool_mode=ctx.pool_mode, aligned=ctx.aligned) return (grad_input, None, None, None, None, None, None)
class RoIAlign(nn.Module): "RoI align pooling layer.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n pool_mode (str, 'avg' or 'max'): pooling mode in each bin.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n use_torchvision (bool): whether to use roi_align from torchvision.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n " @deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlign') def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, pool_mode='avg', aligned=True, use_torchvision=False): super(RoIAlign, self).__init__() self.output_size = _pair(output_size) self.spatial_scale = float(spatial_scale) self.sampling_ratio = int(sampling_ratio) self.pool_mode = pool_mode self.aligned = aligned self.use_torchvision = use_torchvision def forward(self, input, rois): '\n Args:\n input: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.\n ' if self.use_torchvision: from torchvision.ops import roi_align as tv_roi_align if ('aligned' in tv_roi_align.__code__.co_varnames): return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) else: if self.aligned: rois -= rois.new_tensor(([0.0] + ([(0.5 / self.spatial_scale)] * 4))) return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) else: return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.pool_mode, self.aligned) def __repr__(self): s = self.__class__.__name__ s += f'(output_size={self.output_size}, ' s += f'spatial_scale={self.spatial_scale}, ' s += f'sampling_ratio={self.sampling_ratio}, ' s += f'pool_mode={self.pool_mode}, ' s += f'aligned={self.aligned}, ' s += f'use_torchvision={self.use_torchvision})' return s
class RoIAlignRotatedFunction(Function): @staticmethod def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, aligned, clockwise): if isinstance(output_size, int): out_h = output_size out_w = output_size elif isinstance(output_size, tuple): assert (len(output_size) == 2) assert isinstance(output_size[0], int) assert isinstance(output_size[1], int) (out_h, out_w) = output_size else: raise TypeError('"output_size" must be an integer or tuple of integers') return g.op('mmcv::MMCVRoIAlignRotated', input, rois, output_height_i=out_h, output_width_i=out_h, spatial_scale_f=spatial_scale, sampling_ratio_i=sampling_ratio, aligned_i=aligned, clockwise_i=clockwise) @staticmethod def forward(ctx, input, rois, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False): ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.aligned = aligned ctx.clockwise = clockwise ctx.save_for_backward(rois) ctx.feature_size = input.size() (batch_size, num_channels, data_height, data_width) = input.size() num_rois = rois.size(0) output = input.new_zeros(num_rois, num_channels, ctx.output_size[0], ctx.output_size[1]) ext_module.roi_align_rotated_forward(input, rois, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise) return output @staticmethod def backward(ctx, grad_output): feature_size = ctx.feature_size rois = ctx.saved_tensors[0] assert (feature_size is not None) (batch_size, num_channels, data_height, data_width) = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width) ext_module.roi_align_rotated_backward(grad_output.contiguous(), rois, grad_input, pooled_height=out_h, pooled_width=out_w, spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, aligned=ctx.aligned, clockwise=ctx.clockwise) return (grad_input, grad_rois, None, None, None, None, None)
class RoIAlignRotated(nn.Module): "RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio(int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current models.\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n Default: True.\n clockwise (bool): If True, the angle in each proposal follows a\n clockwise fashion in image space, otherwise, the angle is\n counterclockwise. Default: False.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n " @deprecated_api_warning({'out_size': 'output_size', 'sample_num': 'sampling_ratio'}, cls_name='RoIAlignRotated') def __init__(self, output_size, spatial_scale, sampling_ratio=0, aligned=True, clockwise=False): super(RoIAlignRotated, self).__init__() self.output_size = _pair(output_size) self.spatial_scale = float(spatial_scale) self.sampling_ratio = int(sampling_ratio) self.aligned = aligned self.clockwise = clockwise def forward(self, input, rois): return RoIAlignRotatedFunction.apply(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned, self.clockwise) def __repr__(self): s = self.__class__.__name__ s += f'(output_size={self.output_size}, ' s += f'spatial_scale={self.spatial_scale}, ' s += f'sampling_ratio={self.sampling_ratio}, ' s += f'aligned={self.aligned}, ' s += f'clockwise={self.clockwise})' return s
class RoIPoolFunction(Function): @staticmethod def symbolic(g, input, rois, output_size, spatial_scale): return g.op('MaxRoiPool', input, rois, pooled_shape_i=output_size, spatial_scale_f=spatial_scale) @staticmethod def forward(ctx, input, rois, output_size, spatial_scale=1.0): ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.input_shape = input.size() assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!' output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1]) output = input.new_zeros(output_shape) argmax = input.new_zeros(output_shape, dtype=torch.int) ext_module.roi_pool_forward(input, rois, output, argmax, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale) ctx.save_for_backward(rois, argmax) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (rois, argmax) = ctx.saved_tensors grad_input = grad_output.new_zeros(ctx.input_shape) ext_module.roi_pool_backward(grad_output, rois, argmax, grad_input, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale) return (grad_input, None, None, None)
class RoIPool(nn.Module): def __init__(self, output_size, spatial_scale=1.0): super(RoIPool, self).__init__() self.output_size = _pair(output_size) self.spatial_scale = float(spatial_scale) def forward(self, input, rois): return roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self): s = self.__class__.__name__ s += f'(output_size={self.output_size}, ' s += f'spatial_scale={self.spatial_scale})' return s
class RoIAwarePool3d(nn.Module): "Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_ for more\n details.\n\n Args:\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int, optional): The maximum number of points per\n voxel. Default: 128.\n mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'.\n Default: 'max'.\n " def __init__(self, out_size, max_pts_per_voxel=128, mode='max'): super().__init__() self.out_size = out_size self.max_pts_per_voxel = max_pts_per_voxel assert (mode in ['max', 'avg']) pool_mapping = {'max': 0, 'avg': 1} self.mode = pool_mapping[mode] def forward(self, rois, pts, pts_feature): '\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n ' return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_per_voxel, self.mode)
class RoIAwarePool3dFunction(Function): @staticmethod def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode): '\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): [npoints, 3], coordinates of input points.\n pts_feature (torch.Tensor): [npoints, C], features of input points.\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_per_voxel (int): The maximum number of points per voxel.\n Default: 128.\n mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average\n pool).\n\n Returns:\n torch.Tensor: Pooled features whose shape is\n [N, out_x, out_y, out_z, C].\n ' if isinstance(out_size, int): out_x = out_y = out_z = out_size else: assert (len(out_size) == 3) assert mmcv.is_tuple_of(out_size, int) (out_x, out_y, out_z) = out_size num_rois = rois.shape[0] num_channels = pts_feature.shape[(- 1)] num_pts = pts.shape[0] pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels)) argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_per_voxel), dtype=torch.int) ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method=mode) ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, num_pts, num_channels) return pooled_features @staticmethod def backward(ctx, grad_out): ret = ctx.roiaware_pool3d_for_backward (pts_idx_of_voxels, argmax, mode, num_pts, num_channels) = ret grad_in = grad_out.new_zeros((num_pts, num_channels)) ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method=mode) return (None, None, grad_in, None, None, None)
class RoIPointPool3d(nn.Module): 'Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `Paper of PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_\n for more details.\n\n Args:\n num_sampled_points (int, optional): Number of samples in each roi.\n Default: 512.\n ' def __init__(self, num_sampled_points=512): super().__init__() self.num_sampled_points = num_sampled_points def forward(self, points, point_features, boxes3d): '\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n ' return RoIPointPool3dFunction.apply(points, point_features, boxes3d, self.num_sampled_points)
class RoIPointPool3dFunction(Function): @staticmethod def forward(ctx, points, point_features, boxes3d, num_sampled_points=512): '\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape\n is (B, N, C).\n boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).\n num_sampled_points (int, optional): The num of sampled points.\n Default: 512.\n\n Returns:\n tuple[torch.Tensor]: A tuple contains two elements. The first one\n is the pooled features whose shape is (B, M, 512, 3 + C). The\n second is an empty flag whose shape is (B, M).\n ' assert ((len(points.shape) == 3) and (points.shape[2] == 3)) (batch_size, boxes_num, feature_len) = (points.shape[0], boxes3d.shape[1], point_features.shape[2]) pooled_boxes3d = boxes3d.view(batch_size, (- 1), 7) pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, (3 + feature_len))) pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int() ext_module.roipoint_pool3d_forward(points.contiguous(), pooled_boxes3d.contiguous(), point_features.contiguous(), pooled_features, pooled_empty_flag) return (pooled_features, pooled_empty_flag) @staticmethod def backward(ctx, grad_out): raise NotImplementedError
class RotatedFeatureAlignFunction(Function): 'Using the feature interpolation to obtain the position information\n correspond to the refined rotate anchors and reconstruct the feature maps\n in pixel-wise manner to achieve feature alignment.\n\n The details are described in the paper\n `R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating\n Object <https://arxiv.org/abs/1908.05612>`_.\n ' @staticmethod def forward(ctx, features, best_rbboxes, spatial_scale, points): '\n Args:\n features (torch.Tensor): Input features with shape [N,C,H,W].\n best_rbboxes (torch.Tensor): Refined rotate anchors with\n shape [N,H,W,5]. Coordinate format (cx,cx,h,w,a).\n spatial_scale (float): The scale of feature map size and\n input image size.\n points (int, optional): The number of sample points.\n Only 1 and 5 are supported. Defaults to 1.\n\n Returns:\n torch.Tensor: Refined features with shape [N,C,H,W].\n ' ctx.spatial_scale = spatial_scale ctx.points = points ctx.save_for_backward(best_rbboxes) assert (points in [1, 5]) output = torch.zeros_like(features) ext_module.rotated_feature_align_forward(features, best_rbboxes, output, spatial_scale=spatial_scale, points=points) return output @staticmethod @once_differentiable def backward(ctx, grad_output): '\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [N,C,H,W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape [N,C,H,W].\n ' best_rbboxes = ctx.saved_tensors[0] points = ctx.points spatial_scale = ctx.spatial_scale grad_input = None if ctx.needs_input_grad[0]: grad_input = torch.zeros_like(grad_output) ext_module.rotated_feature_align_backward(grad_output.contiguous(), best_rbboxes, grad_input, spatial_scale=spatial_scale, points=points) return (grad_input, None, None, None)