code
stringlengths
17
6.64M
def set_default(obj): 'Set default json values for non-serializable values.\n\n It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.\n It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,\n etc.) into plain numbers of plain python built-in types.\n ' if isinstance(obj, (set, range)): return list(obj) elif isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, np.generic): return obj.item() raise TypeError(f'{type(obj)} is unsupported for json dump')
class JsonHandler(BaseFileHandler): def load_from_fileobj(self, file): return json.load(file) def dump_to_fileobj(self, obj, file, **kwargs): kwargs.setdefault('default', set_default) json.dump(obj, file, **kwargs) def dump_to_str(self, obj, **kwargs): kwargs.setdefault('default', set_default) return json.dumps(obj, **kwargs)
class PickleHandler(BaseFileHandler): str_like = False def load_from_fileobj(self, file, **kwargs): return pickle.load(file, **kwargs) def load_from_path(self, filepath, **kwargs): return super(PickleHandler, self).load_from_path(filepath, mode='rb', **kwargs) def dump_to_str(self, obj, **kwargs): kwargs.setdefault('protocol', 2) return pickle.dumps(obj, **kwargs) def dump_to_fileobj(self, obj, file, **kwargs): kwargs.setdefault('protocol', 2) pickle.dump(obj, file, **kwargs) def dump_to_path(self, obj, filepath, **kwargs): super(PickleHandler, self).dump_to_path(obj, filepath, mode='wb', **kwargs)
class YamlHandler(BaseFileHandler): def load_from_fileobj(self, file, **kwargs): kwargs.setdefault('Loader', Loader) return yaml.load(file, **kwargs) def dump_to_fileobj(self, obj, file, **kwargs): kwargs.setdefault('Dumper', Dumper) yaml.dump(obj, file, **kwargs) def dump_to_str(self, obj, **kwargs): kwargs.setdefault('Dumper', Dumper) return yaml.dump(obj, **kwargs)
def load(file, file_format=None, file_client_args=None, **kwargs): 'Load data from json/yaml/pickle files.\n\n This method provides a unified api for loading data from serialized files.\n\n Note:\n In v1.3.16 and later, ``load`` supports loading data from serialized\n files those can be storaged in different backends.\n\n Args:\n file (str or :obj:`Path` or file-like object): Filename or a file-like\n object.\n file_format (str, optional): If not specified, the file format will be\n inferred from the file extension, otherwise use the specified one.\n Currently supported formats include "json", "yaml/yml" and\n "pickle/pkl".\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> load(\'/path/of/your/file\') # file is storaged in disk\n >>> load(\'https://path/of/your/file\') # file is storaged in Internet\n >>> load(\'s3://path/of/your/file\') # file is storaged in petrel\n\n Returns:\n The content from the file.\n ' if isinstance(file, Path): file = str(file) if ((file_format is None) and is_str(file)): file_format = file.split('.')[(- 1)] if (file_format not in file_handlers): raise TypeError(f'Unsupported format: {file_format}') handler = file_handlers[file_format] if is_str(file): file_client = FileClient.infer_client(file_client_args, file) if handler.str_like: with StringIO(file_client.get_text(file)) as f: obj = handler.load_from_fileobj(f, **kwargs) else: with BytesIO(file_client.get(file)) as f: obj = handler.load_from_fileobj(f, **kwargs) elif hasattr(file, 'read'): obj = handler.load_from_fileobj(file, **kwargs) else: raise TypeError('"file" must be a filepath str or a file-object') return obj
def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs): "Dump data to json/yaml/pickle strings or files.\n\n This method provides a unified api for dumping data as strings or to files,\n and also supports custom arguments for each file format.\n\n Note:\n In v1.3.16 and later, ``dump`` supports dumping data as strings or to\n files which is saved to different backends.\n\n Args:\n obj (any): The python object to be dumped.\n file (str or :obj:`Path` or file-like object, optional): If not\n specified, then the object is dumped to a str, otherwise to a file\n specified by the filename or file-like object.\n file_format (str, optional): Same as :func:`load`.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> dump('hello world', '/path/of/your/file') # disk\n >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel\n\n Returns:\n bool: True for success, False otherwise.\n " if isinstance(file, Path): file = str(file) if (file_format is None): if is_str(file): file_format = file.split('.')[(- 1)] elif (file is None): raise ValueError('file_format must be specified since file is None') if (file_format not in file_handlers): raise TypeError(f'Unsupported format: {file_format}') handler = file_handlers[file_format] if (file is None): return handler.dump_to_str(obj, **kwargs) elif is_str(file): file_client = FileClient.infer_client(file_client_args, file) if handler.str_like: with StringIO() as f: handler.dump_to_fileobj(obj, f, **kwargs) file_client.put_text(f.getvalue(), file) else: with BytesIO() as f: handler.dump_to_fileobj(obj, f, **kwargs) file_client.put(f.getvalue(), file) elif hasattr(file, 'write'): handler.dump_to_fileobj(obj, file, **kwargs) else: raise TypeError('"file" must be a filename str or a file-object')
def _register_handler(handler, file_formats): 'Register a handler for some file extensions.\n\n Args:\n handler (:obj:`BaseFileHandler`): Handler to be registered.\n file_formats (str or list[str]): File formats to be handled by this\n handler.\n ' if (not isinstance(handler, BaseFileHandler)): raise TypeError(f'handler must be a child of BaseFileHandler, not {type(handler)}') if isinstance(file_formats, str): file_formats = [file_formats] if (not is_list_of(file_formats, str)): raise TypeError('file_formats must be a str or a list of str') for ext in file_formats: file_handlers[ext] = handler
def register_handler(file_formats, **kwargs): def wrap(cls): _register_handler(cls(**kwargs), file_formats) return cls return wrap
def list_from_file(filename, prefix='', offset=0, max_num=0, encoding='utf-8', file_client_args=None): "Load a text file and parse the content as a list of strings.\n\n Note:\n In v1.3.16 and later, ``list_from_file`` supports loading a text file\n which can be storaged in different backends and parsing the content as\n a list for strings.\n\n Args:\n filename (str): Filename.\n prefix (str): The prefix to be inserted to the beginning of each item.\n offset (int): The offset of lines.\n max_num (int): The maximum number of lines to be read,\n zeros and negatives mean no limitation.\n encoding (str): Encoding used to open the file. Default utf-8.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> list_from_file('/path/of/your/file') # disk\n ['hello', 'world']\n >>> list_from_file('s3://path/of/your/file') # ceph or petrel\n ['hello', 'world']\n\n Returns:\n list[str]: A list of strings.\n " cnt = 0 item_list = [] file_client = FileClient.infer_client(file_client_args, filename) with StringIO(file_client.get_text(filename, encoding)) as f: for _ in range(offset): f.readline() for line in f: if (0 < max_num <= cnt): break item_list.append((prefix + line.rstrip('\n\r'))) cnt += 1 return item_list
def dict_from_file(filename, key_type=str, encoding='utf-8', file_client_args=None): "Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns split by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will be parsed as dict values.\n\n Note:\n In v1.3.16 and later, ``dict_from_file`` supports loading a text file\n which can be storaged in different backends and parsing the content as\n a dict.\n\n Args:\n filename(str): Filename.\n key_type(type): Type of the dict keys. str is user by default and\n type conversion will be performed if specified.\n encoding (str): Encoding used to open the file. Default utf-8.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n\n Examples:\n >>> dict_from_file('/path/of/your/file') # disk\n {'key1': 'value1', 'key2': 'value2'}\n >>> dict_from_file('s3://path/of/your/file') # ceph or petrel\n {'key1': 'value1', 'key2': 'value2'}\n\n Returns:\n dict: The parsed contents.\n " mapping = {} file_client = FileClient.infer_client(file_client_args, filename) with StringIO(file_client.get_text(filename, encoding)) as f: for line in f: items = line.rstrip('\n').split() assert (len(items) >= 2) key = key_type(items[0]) val = (items[1:] if (len(items) > 2) else items[1]) mapping[key] = val return mapping
def imconvert(img, src, dst): "Convert an image from the src colorspace to dst colorspace.\n\n Args:\n img (ndarray): The input image.\n src (str): The source colorspace, e.g., 'rgb', 'hsv'.\n dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.\n\n Returns:\n ndarray: The converted image.\n " code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') out_img = cv2.cvtColor(img, code) return out_img
def bgr2gray(img, keepdim=False): 'Convert a BGR image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n ' out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if keepdim: out_img = out_img[(..., None)] return out_img
def rgb2gray(img, keepdim=False): 'Convert a RGB image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n ' out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if keepdim: out_img = out_img[(..., None)] return out_img
def gray2bgr(img): 'Convert a grayscale image to BGR image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n ' img = (img[(..., None)] if (img.ndim == 2) else img) out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
def gray2rgb(img): 'Convert a grayscale image to RGB image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted RGB image.\n ' img = (img[(..., None)] if (img.ndim == 2) else img) out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) return out_img
def _convert_input_type_range(img): 'Convert the type and range of the input image.\n\n It converts the input image to np.float32 type and range of [0, 1].\n It is mainly used for pre-processing the input image in colorspace\n conversion functions such as rgb2ycbcr and ycbcr2rgb.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n (ndarray): The converted image with type of np.float32 and range of\n [0, 1].\n ' img_type = img.dtype img = img.astype(np.float32) if (img_type == np.float32): pass elif (img_type == np.uint8): img /= 255.0 else: raise TypeError(f'The img type should be np.float32 or np.uint8, but got {img_type}') return img
def _convert_output_type_range(img, dst_type): 'Convert the type and range of the image according to dst_type.\n\n It converts the image to desired type and range. If `dst_type` is np.uint8,\n images will be converted to np.uint8 type with range [0, 255]. If\n `dst_type` is np.float32, it converts the image to np.float32 type with\n range [0, 1].\n It is mainly used for post-processing images in colorspace conversion\n functions such as rgb2ycbcr and ycbcr2rgb.\n\n Args:\n img (ndarray): The image to be converted with np.float32 type and\n range [0, 255].\n dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it\n converts the image to np.uint8 type with range [0, 255]. If\n dst_type is np.float32, it converts the image to np.float32 type\n with range [0, 1].\n\n Returns:\n (ndarray): The converted image with desired type and range.\n ' if (dst_type not in (np.uint8, np.float32)): raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}') if (dst_type == np.uint8): img = img.round() else: img /= 255.0 return img.astype(dst_type)
def rgb2ycbcr(img, y_only=False): "Convert a RGB image to YCbCr image.\n\n This function produces the same results as Matlab's `rgb2ycbcr` function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n y_only (bool): Whether to only return Y channel. Default: False.\n\n Returns:\n ndarray: The converted YCbCr image. The output image has the same type\n and range as input image.\n " img_type = img.dtype img = _convert_input_type_range(img) if y_only: out_img = (np.dot(img, [65.481, 128.553, 24.966]) + 16.0) else: out_img = (np.matmul(img, [[65.481, (- 37.797), 112.0], [128.553, (- 74.203), (- 93.786)], [24.966, 112.0, (- 18.214)]]) + [16, 128, 128]) out_img = _convert_output_type_range(out_img, img_type) return out_img
def bgr2ycbcr(img, y_only=False): 'Convert a BGR image to YCbCr image.\n\n The bgr version of rgb2ycbcr.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n y_only (bool): Whether to only return Y channel. Default: False.\n\n Returns:\n ndarray: The converted YCbCr image. The output image has the same type\n and range as input image.\n ' img_type = img.dtype img = _convert_input_type_range(img) if y_only: out_img = (np.dot(img, [24.966, 128.553, 65.481]) + 16.0) else: out_img = (np.matmul(img, [[24.966, 112.0, (- 18.214)], [128.553, (- 74.203), (- 93.786)], [65.481, (- 37.797), 112.0]]) + [16, 128, 128]) out_img = _convert_output_type_range(out_img, img_type) return out_img
def ycbcr2rgb(img): "Convert a YCbCr image to RGB image.\n\n This function produces the same results as Matlab's ycbcr2rgb function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n ndarray: The converted RGB image. The output image has the same type\n and range as input image.\n " img_type = img.dtype img = (_convert_input_type_range(img) * 255) out_img = ((np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, (- 0.00153632), 0.00791071], [0.00625893, (- 0.00318811), 0]]) * 255.0) + [(- 222.921), 135.576, (- 276.836)]) out_img = _convert_output_type_range(out_img, img_type) return out_img
def ycbcr2bgr(img): 'Convert a YCbCr image to BGR image.\n\n The bgr version of ycbcr2rgb.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.\n In OpenCV, it implements a JPEG conversion. See more details in\n https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.\n\n Args:\n img (ndarray): The input image. It accepts:\n 1. np.uint8 type with range [0, 255];\n 2. np.float32 type with range [0, 1].\n\n Returns:\n ndarray: The converted BGR image. The output image has the same type\n and range as input image.\n ' img_type = img.dtype img = (_convert_input_type_range(img) * 255) out_img = ((np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, (- 0.00153632), 0], [0, (- 0.00318811), 0.00625893]]) * 255.0) + [(- 276.836), 135.576, (- 222.921)]) out_img = _convert_output_type_range(out_img, img_type) return out_img
def convert_color_factory(src, dst): code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') def convert_color(img): out_img = cv2.cvtColor(img, code) return out_img convert_color.__doc__ = f'''Convert a {src.upper()} image to {dst.upper()} image. Args: img (ndarray or str): The input image. Returns: ndarray: The converted {dst.upper()} image. ''' return convert_color
def tensor2imgs(tensor, mean=None, std=None, to_rgb=True): 'Convert tensor to 3-channel images or 1-channel gray images.\n\n Args:\n tensor (torch.Tensor): Tensor that contains multiple images, shape (\n N, C, H, W). :math:`C` can be either 3 or 1.\n mean (tuple[float], optional): Mean of images. If None,\n (0, 0, 0) will be used for tensor with 3-channel,\n while (0, ) for tensor with 1-channel. Defaults to None.\n std (tuple[float], optional): Standard deviation of images. If None,\n (1, 1, 1) will be used for tensor with 3-channel,\n while (1, ) for tensor with 1-channel. Defaults to None.\n to_rgb (bool, optional): Whether the tensor was converted to RGB\n format in the first place. If so, convert it back to BGR.\n For the tensor with 1 channel, it must be False. Defaults to True.\n\n Returns:\n list[np.ndarray]: A list that contains multiple images.\n ' if (torch is None): raise RuntimeError('pytorch is not installed') assert (torch.is_tensor(tensor) and (tensor.ndim == 4)) channels = tensor.size(1) assert (channels in [1, 3]) if (mean is None): mean = ((0,) * channels) if (std is None): std = ((1,) * channels) assert ((channels == len(mean) == len(std) == 3) or ((channels == len(mean) == len(std) == 1) and (not to_rgb))) num_imgs = tensor.size(0) mean = np.array(mean, dtype=np.float32) std = np.array(std, dtype=np.float32) imgs = [] for img_id in range(num_imgs): img = tensor[(img_id, ...)].cpu().numpy().transpose(1, 2, 0) img = mmcv.imdenormalize(img, mean, std, to_bgr=to_rgb).astype(np.uint8) imgs.append(np.ascontiguousarray(img)) return imgs
def is_custom_op_loaded(): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) flag = False try: from ..tensorrt import is_tensorrt_plugin_loaded flag = is_tensorrt_plugin_loaded() except (ImportError, ModuleNotFoundError): pass if (not flag): try: from ..ops import get_onnxruntime_op_path ort_lib_path = get_onnxruntime_op_path() flag = os.path.exists(ort_lib_path) except (ImportError, ModuleNotFoundError): pass return (flag or (torch.__version__ == 'parrots'))
def _parse_arg(value, desc): if (desc == 'none'): return value if ((desc == 'v') or (not _is_value(value))): return value if value.node().mustBeNone(): return None if (value.node().kind() == 'onnx::Constant'): tval = value.node()['value'] if (desc == 'i'): return int(tval) elif (desc == 'f'): return float(tval) elif (desc == 'b'): return bool(tval) elif (desc == 's'): return str(tval) elif (desc == 't'): return tval elif (desc == 'is'): return [int(v) for v in tval] elif (desc == 'fs'): return [float(v) for v in tval] else: raise RuntimeError("ONNX symbolic doesn't know to interpret Constant node") elif (value.node().kind() == 'prim::ListConstruct'): if (desc == 'is'): for v in value.node().inputs(): if (v.node().kind() != 'onnx::Constant'): raise RuntimeError((("Failed to export an ONNX attribute '" + v.node().kind()) + "', since it's not constant, please try to make things (e.g., kernel size) static if possible")) return [int(v.node()['value']) for v in value.node().inputs()] else: raise RuntimeError("ONNX symbolic doesn't know to interpret ListConstruct node") raise RuntimeError('Unexpected node type: {}'.format(value.node().kind()))
def _maybe_get_const(value, desc): if (_is_value(value) and (value.node().kind() == 'onnx::Constant')): return _parse_arg(value, desc) return value
def _maybe_get_scalar(value): value_t = _maybe_get_const(value, 't') if (isinstance(value_t, torch.Tensor) and (value_t.shape == ())): return value_t return value
def _get_const(value, desc, arg_name): if (_is_value(value) and (value.node().kind() not in ('onnx::Constant', 'prim::Constant'))): raise RuntimeError('ONNX symbolic expected a constant value of the {} argument, got `{}`'.format(arg_name, value)) return _parse_arg(value, desc)
def _unpack_list(list_value): list_node = list_value.node() assert (list_node.kind() == 'prim::ListConstruct') return list(list_node.inputs())
def _is_packed_list(list_value): return (_is_value(list_value) and (list_value.node().kind() == 'prim::ListConstruct'))
def parse_args(*arg_descriptors): def decorator(fn): fn._arg_descriptors = arg_descriptors def wrapper(g, *args): assert (len(arg_descriptors) >= len(args)) args = [_parse_arg(arg, arg_desc) for (arg, arg_desc) in zip(args, arg_descriptors)] return fn(g, *args) try: wrapper = wraps(fn)(wrapper) except Exception: pass return wrapper return decorator
def _scalar(x): 'Convert a scalar tensor into a Python value.' assert (x.numel() == 1) return x.item()
def _if_scalar_type_as(g, self, tensor): 'Convert self into the same type of tensor, as necessary.' if isinstance(self, torch._C.Value): return self scalar_type = tensor.type().scalarType() if scalar_type: ty = scalar_type.lower() return getattr(self, ty)() return self
def _is_none(x): return x.node().mustBeNone()
def _is_value(x): return isinstance(x, torch._C.Value)
def _is_tensor_list(x): return x.type().isSubtypeOf(ListType.ofTensors())
def _unimplemented(op, msg): warnings.warn((((('ONNX export failed on ' + op) + ' because ') + msg) + ' not supported'))
def _try_get_scalar_type(*args): for arg in args: try: return arg.type().scalarType() except RuntimeError: pass return None
def _topk_helper(g, input, k, dim, largest=True, sorted=False, out=None): if (out is not None): _unimplemented('TopK', 'Out parameter is not supported') if (not _is_value(k)): k = g.op('Constant', value_t=torch.tensor([k], dtype=torch.int64)) else: k = g.op('Reshape', k, g.op('Constant', value_t=torch.tensor([1]))) return g.op('TopK', input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2)
def _slice_helper(g, input, axes, starts, ends, steps=None, dynamic_slice=False): from torch.onnx.symbolic_opset10 import _slice return _slice(g, input, axes, starts, ends, steps, dynamic_slice)
def _unsqueeze_helper(g, input, dim): from torch.onnx.symbolic_opset9 import unsqueeze return unsqueeze(g, input, dim)
def _interpolate_size_to_scales(g, input, output_size, dim): output_size = _maybe_get_const(output_size, 'is') if _is_value(output_size): offset = 2 offsets = g.op('Constant', value_t=torch.ones(offset, dtype=torch.float32)) dividend = g.op('Cast', output_size, to_i=cast_pytorch_to_onnx['Float']) divisor = _slice_helper(g, g.op('Shape', input), axes=[0], ends=[maxsize], starts=[offset]) divisor = g.op('Cast', divisor, to_i=cast_pytorch_to_onnx['Float']) scale_dims = g.op('Div', dividend, divisor) scales = g.op('Concat', offsets, scale_dims, axis_i=0) else: scales_constant = [(1.0 if (i < 2) else (float(output_size[(- (dim - i))]) / float(input.type().sizes()[(- (dim - i))]))) for i in range(0, dim)] scales = g.op('Constant', value_t=torch.tensor(scales_constant, dtype=torch.float32)) return scales
def _interpolate_get_scales_if_available(g, scales): if (len(scales) == 0): return None scale_desc = ('fs' if ((scales[0].type().kind() == 'ListType') or ((scales[0].type().kind() == 'TensorType') and (sum(scales[0].type().sizes()) > 1))) else 'f') available_scales = ((_maybe_get_const(scales[0], scale_desc) != (- 1)) and (not _is_none(scales[0]))) if (not available_scales): return None offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32)) if (scale_desc == 'fs'): scales_list = g.op('Constant', value_t=torch.tensor(_maybe_get_const(scales[0], scale_desc))) scales = g.op('Concat', offsets, scales_list, axis_i=0) else: scales_list = [] for scale in scales: unsqueezed_scale = _unsqueeze_helper(g, scale, 0) unsqueezed_scale = g.op('Cast', unsqueezed_scale, to_i=cast_pytorch_to_onnx['Float']) scales_list.append(unsqueezed_scale) scales = g.op('Concat', offsets, *scales_list, axis_i=0) return scales
def _get_interpolate_attributes(g, mode, args): if (mode == 'nearest'): align_corners = None scales = args[0:] else: align_corners = args[0] scales = args[1:] scales = _interpolate_get_scales_if_available(g, scales) return (scales, align_corners)
def _interpolate_get_scales(g, scale_factor, dim): offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32)) if isinstance(scale_factor.type(), torch._C.ListType): return g.op('Concat', offsets, scale_factor, axis_i=0) else: scale_factor = _unsqueeze_helper(g, scale_factor, 0) scale_factor = g.op('Cast', scale_factor, to_i=cast_pytorch_to_onnx['Float']) scales = [scale_factor for i in range((dim - 2))] scale_factor = g.op('Concat', offsets, *scales, axis_i=0) return scale_factor
def _size_helper(g, self, dim): full_shape = g.op('Shape', self) from torch.onnx.symbolic_opset9 import select return select(g, full_shape, g.op('Constant', value_t=torch.tensor([0])), dim)
def _avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name): if (divisor_override and (divisor_override.node().kind() != 'prim::Constant')): return _unimplemented(name, 'divisor_override') if (not stride): stride = kernel_size padding = tuple(tuple_fn(padding)) return padding
def _interpolate(name, dim, interpolate_mode): def symbolic_fn(g, input, output_size, *args): (scales, align_corners) = sym_help._get_interpolate_attributes(g, interpolate_mode, args) align_corners = sym_help._maybe_get_scalar(align_corners) transformation_mode = ('asymmetric' if (interpolate_mode == 'nearest') else ('align_corners' if align_corners else 'pytorch_half_pixel')) empty_tensor = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32)) if (scales is None): if (('ONNX_BACKEND' in os.environ) and (os.environ['ONNX_BACKEND'] == 'TensorRT')): input_size = input.type().sizes() input_size = input_size[:2] output_size = sym_help._maybe_get_const(output_size, 'is') input_size.extend(output_size) output_size = g.op('Constant', value_t=torch.tensor(input_size, dtype=torch.int64)) else: input_size = g.op('Shape', input) input_size_beg = sym_help._slice_helper(g, input_size, axes=[0], ends=[2], starts=[0]) output_size = g.op('Cast', output_size, to_i=sym_help.cast_pytorch_to_onnx['Long']) output_size = g.op('Concat', input_size_beg, output_size, axis_i=0) scales = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32)) return g.op('Resize', input, empty_tensor, scales, output_size, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor') else: return g.op('Resize', input, empty_tensor, scales, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor') return symbolic_fn
@parse_args('v', 'v', 'i', 'i', 'i', 'none') def topk(g, self, k, dim, largest, sorted, out=None): return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
def masked_select(g, self, mask): from torch.onnx.symbolic_opset9 import expand_as, nonzero index = nonzero(g, expand_as(g, mask, self)) return g.op('GatherND', self, index)
def _prepare_onnx_paddings(g, dim, pad): pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op('Constant', value_t=torch.tensor([0]))) extension = g.op('Sub', g.op('Mul', g.op('Constant', value_t=torch.tensor(dim, dtype=torch.int64)), g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))), pad_len) pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long']) paddings = g.op('Concat', pad, g.op('ConstantOfShape', extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0) paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1), 2]))) paddings = g.op('Transpose', torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0]) paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1)]))) padding_c = g.op('Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long']) return padding_c
def constant_pad_nd(g, input, padding, value=None): mode = 'constant' value = sym_help._maybe_get_scalar(value) value = sym_help._if_scalar_type_as(g, value, input) pad = _prepare_onnx_paddings(g, input.type().dim(), padding) return g.op('Pad', input, pad, value, mode_s=mode)
def reflection_pad(g, input, padding): mode = 'reflect' paddings = _prepare_onnx_paddings(g, input.type().dim(), padding) return g.op('Pad', input, paddings, mode_s=mode)
def _avg_pool(name, tuple_fn): @parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none') def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None): padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name) if (not stride): stride = kernel_size if count_include_pad: input = g.op('Pad', input, g.op('Constant', value_t=torch.tensor(((((0,) * 2) + padding) * 2))), mode_s='constant') padding = ((0,) * len(padding)) output = g.op('AveragePool', input, kernel_shape_i=tuple_fn(kernel_size), strides_i=tuple_fn(stride), pads_i=(padding * 2), ceil_mode_i=ceil_mode) return output return symbolic_fn
def _get_im2col_indices_along_dim(g, input_d, kernel_size_d, dilation_d, padding_d, stride_d): blocks_d = g.op('Add', input_d, g.op('Constant', value_t=torch.tensor((padding_d * 2)))) blocks_d = g.op('Sub', blocks_d, g.op('Constant', value_t=torch.tensor((dilation_d * (kernel_size_d - 1))))) blocks_d_indices = g.op('Range', g.op('Constant', value_t=torch.tensor(0)), blocks_d, g.op('Constant', value_t=torch.tensor(stride_d))) kernel_grid = np.arange(0, (kernel_size_d * dilation_d), dilation_d) kernel_grid = g.op('Constant', value_t=torch.tensor([kernel_grid])) blocks_d_indices = g.op('Unsqueeze', blocks_d_indices, axes_i=[0]) kernel_mask = g.op('Reshape', kernel_grid, g.op('Constant', value_t=torch.tensor([(- 1), 1]))) block_mask = g.op('Add', blocks_d_indices, kernel_mask) return block_mask
def _get_im2col_padded_input(g, input, padding_h, padding_w): pad = g.op('Constant', value_t=torch.LongTensor(([0, 0, padding_h, padding_w] * 2))) return g.op('Pad', input, pad)
def _get_im2col_output_shape(g, input, kernel_h, kernel_w): batch_dim = size(g, input, g.op('Constant', value_t=torch.tensor(0))) channel_dim = size(g, input, g.op('Constant', value_t=torch.tensor(1))) channel_unfolded = g.op('Mul', channel_dim, g.op('Constant', value_t=torch.tensor((kernel_h * kernel_w)))) return g.op('Concat', g.op('Unsqueeze', batch_dim, axes_i=[0]), g.op('Unsqueeze', channel_unfolded, axes_i=[0]), g.op('Constant', value_t=torch.tensor([(- 1)])), axis_i=0)
def size(g, self, dim=None): if (dim is None): return g.op('Shape', self) return sym_help._size_helper(g, self, dim)
@parse_args('v', 'is', 'is', 'is', 'is') def im2col(g, input, kernel_size, dilation, padding, stride): input_h = size(g, input, g.op('Constant', value_t=torch.tensor(2))) input_w = size(g, input, g.op('Constant', value_t=torch.tensor(3))) (stride_h, stride_w) = (stride[0], stride[1]) (padding_h, padding_w) = (padding[0], padding[1]) (dilation_h, dilation_w) = (dilation[0], dilation[1]) (kernel_h, kernel_w) = (kernel_size[0], kernel_size[1]) blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h) blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w) output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w) padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w) output = g.op('Gather', padded_input, blocks_row_indices, axis_i=2) output = g.op('Gather', output, blocks_col_indices, axis_i=4) output = g.op('Transpose', output, perm_i=[0, 1, 2, 4, 3, 5]) return g.op('Reshape', output, output_shape)
@parse_args('v', 'i') def one_hot(g, self, num_classes): values = g.op('Constant', value_t=torch.LongTensor([0, 1])) depth = g.op('Constant', value_t=torch.LongTensor([num_classes])) return g.op('OneHot', self, depth, values, axis_i=(- 1))
@parse_args('v', 'i', 'none') def softmax(g, input, dim, dtype=None): input_dim = input.type().dim() if input_dim: if (dim < 0): dim = (input_dim + dim) if (input_dim == (dim + 1)): softmax = g.op('Softmax', input, axis_i=dim) if (dtype and (dtype.node().kind() != 'prim::Constant')): parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype') softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype]) return softmax max_value = g.op('ReduceMax', input, axes_i=[dim], keepdims_i=1) input = g.op('Sub', input, max_value) exp = g.op('Exp', input) sum = g.op('ReduceSum', exp, axes_i=[dim]) softmax = g.op('Div', exp, sum) if (dtype and (dtype.node().kind() != 'prim::Constant')): parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype') softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype]) return softmax
def _adaptive_pool(name, type, tuple_fn, fn=None): @parse_args('v', 'is') def symbolic_fn(g, input, output_size): if ((output_size == ([1] * len(output_size))) and (type == 'AveragePool')): return g.op('GlobalAveragePool', input) if (not input.isCompleteTensor()): if (output_size == ([1] * len(output_size))): return (g.op('GlobalMaxPool', input), None) raise NotImplementedError('[Adaptive pool]:input size not accessible') dim = input.type().sizes()[2:] if ((output_size == ([1] * len(output_size))) and (type == 'MaxPool')): return (g.op('GlobalMaxPool', input), None) s = [int((dim[i] / output_size[i])) for i in range(0, len(dim))] k = [(dim[i] - ((output_size[i] - 1) * s[i])) for i in range(0, len(dim))] if (type == 'MaxPool'): return fn(g, input, k, k, ((0,) * len(dim)), ((1,) * len(dim)), False) output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(s), ceil_mode_i=False) return output return symbolic_fn
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False): from torch.onnx.symbolic_opset9 import full if ((dtype is None) and self.isCompleteTensor()): dtype = self.type().scalarType() dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype]) return full(g, size, fill_value, dtype, layout, device, pin_memory)
@parse_args('v', 'v', 'i', 'i', 'i') def grid_sampler(g, input, grid, interpolation_mode, padding_mode, align_corners=False): return g.op('mmcv::grid_sampler', input, grid, interpolation_mode_i=interpolation_mode, padding_mode_i=padding_mode, align_corners_i=align_corners)
@parse_args('v', 'i') def cummax(g, input, dim): return g.op('mmcv::cummax', input, dim_i=dim, outputs=2)
@parse_args('v', 'i') def cummin(g, input, dim): return g.op('mmcv::cummin', input, dim_i=dim, outputs=2)
@parse_args('v', 'v', 'is') def roll(g, input, shifts, dims): from packaging import version from torch.onnx.symbolic_opset9 import squeeze input_shape = g.op('Shape', input) need_flatten = (len(dims) == 0) if need_flatten: resize_shape = input_shape input = g.op('Reshape', input, g.op('Constant', value_t=torch.LongTensor([1, (- 1)]))) input_shape = g.op('Shape', input) dims = [1] for (index, dim) in enumerate(dims): end_size = sym_help._slice_helper(g, input_shape, axes=[0], ends=[(dim + 1)], starts=[dim]) shift_size = sym_help._slice_helper(g, shifts, axes=[0], ends=[(index + 1)], starts=[index]) slice_size = g.op('Sub', end_size, shift_size) div_size = g.op('Div', slice_size, end_size) slice_size = g.op('Sub', slice_size, g.op('Mul', end_size, div_size)) if (version.parse(torch.__version__) >= version.parse('1.7.0')): end_size = squeeze(g, end_size, 0) slice_size = squeeze(g, slice_size, 0) else: end_size = g.op('Squeeze', end_size) slice_size = g.op('Squeeze', slice_size) dim = torch.LongTensor([dim]) input_slice0 = sym_help._slice_helper(g, input, axes=dim, starts=torch.LongTensor([0]), ends=slice_size, dynamic_slice=True) input_slice1 = sym_help._slice_helper(g, input, axes=dim, ends=end_size, starts=slice_size, dynamic_slice=True) input = g.op('Concat', input_slice1, input_slice0, axis_i=dim) if need_flatten: input = g.op('Reshape', input, resize_shape) return input
def register_extra_symbolics(opset=11): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) register_op('one_hot', one_hot, '', opset) register_op('im2col', im2col, '', opset) register_op('topk', topk, '', opset) register_op('softmax', softmax, '', opset) register_op('constant_pad_nd', constant_pad_nd, '', opset) register_op('reflection_pad1d', reflection_pad1d, '', opset) register_op('reflection_pad2d', reflection_pad2d, '', opset) register_op('reflection_pad3d', reflection_pad3d, '', opset) register_op('avg_pool1d', avg_pool1d, '', opset) register_op('avg_pool2d', avg_pool2d, '', opset) register_op('avg_pool3d', avg_pool3d, '', opset) register_op('adaptive_avg_pool1d', adaptive_avg_pool1d, '', opset) register_op('adaptive_avg_pool2d', adaptive_avg_pool2d, '', opset) register_op('adaptive_avg_pool3d', adaptive_avg_pool3d, '', opset) register_op('masked_select', masked_select, '', opset) register_op('upsample_nearest1d', upsample_nearest1d, '', opset) register_op('upsample_nearest2d', upsample_nearest2d, '', opset) register_op('upsample_nearest3d', upsample_nearest3d, '', opset) register_op('upsample_linear1d', upsample_linear1d, '', opset) register_op('upsample_bilinear2d', upsample_bilinear2d, '', opset) register_op('upsample_trilinear3d', upsample_trilinear3d, '', opset) register_op('upsample_bicubic2d', upsample_bicubic2d, '', opset) register_op('new_full', new_full, '', opset) register_op('grid_sampler', grid_sampler, '', opset) register_op('cummax', cummax, '', opset) register_op('cummin', cummin, '', opset) register_op('roll', roll, '', opset)
class ActiveRotatedFilterFunction(Function): 'Encoding the orientation information and generating orientation-\n sensitive features.\n\n The details are described in the paper `Align Deep Features for Oriented\n Object Detection <https://arxiv.org/abs/2008.09397>_`.\n ' @staticmethod def forward(ctx, input, indices): '\n Args:\n input (torch.Tensor): Input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n indices (torch.Tensor): Indices with shape\n [num_orientations, H, W, num_rotations].\n\n Returns:\n torch.Tensor: Refined features with shape [num_output_planes *\n num_rotations, num_input_planes * num_orientations, H, W].\n ' ctx.save_for_backward(input, indices) (op, ip, o, h, w) = input.size() (o, h, w, r) = indices.size() output = input.new_zeros(((op * r), (ip * o), h, w)) ext_module.active_rotated_filter_forward(input, indices, output) return output @staticmethod @once_differentiable def backward(ctx, grad_out): '\n Args:\n grad_output (torch.Tensor): The gradiant of output features\n with shape [num_output_planes * num_rotations,\n num_input_planes * num_orientations, H, W].\n\n Returns:\n torch.Tensor: The gradiant of input features with shape\n [num_output_planes, num_input_planes, num_orientations, H, W].\n ' (input, indices) = ctx.saved_tensors grad_in = torch.zeros_like(input) ext_module.active_rotated_filter_backward(grad_out, indices, grad_in) return (grad_in, None)
class AssignScoreWithK(Function): 'Perform weighted sum to generate output features according to scores.\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/paconv_lib/src/gpu>`_.\n\n This is a memory-efficient CUDA implementation of assign_scores operation,\n which first transform all point features with weight bank, then assemble\n neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.\n\n See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for\n more detailed descriptions.\n\n Note:\n This implementation assumes using ``neighbor`` kernel input, which is\n (point_features - center_features, point_features).\n See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/\n pointnet2/paconv.py#L128 for more details.\n ' @staticmethod def forward(ctx, scores, point_features, center_features, knn_idx, aggregate='sum'): "\n Args:\n scores (torch.Tensor): (B, npoint, K, M), predicted scores to\n aggregate weight matrices in the weight bank.\n ``npoint`` is the number of sampled centers.\n ``K`` is the number of queried neighbors.\n ``M`` is the number of weight matrices in the weight bank.\n point_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed point features to be aggregated.\n center_features (torch.Tensor): (B, N, M, out_dim)\n Pre-computed center features to be aggregated.\n knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.\n We assume the first idx in each row is the idx of the center.\n aggregate (str, optional): Aggregation method.\n Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.\n\n Returns:\n torch.Tensor: (B, out_dim, npoint, K), the aggregated features.\n " agg = {'sum': 0, 'avg': 1, 'max': 2} (B, N, M, out_dim) = point_features.size() (_, npoint, K, _) = scores.size() output = point_features.new_zeros((B, out_dim, npoint, K)) ext_module.assign_score_withk_forward(point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), output, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg[aggregate]) ctx.save_for_backward(output, point_features, center_features, scores, knn_idx) ctx.agg = agg[aggregate] return output @staticmethod def backward(ctx, grad_out): '\n Args:\n grad_out (torch.Tensor): (B, out_dim, npoint, K)\n\n Returns:\n tuple[torch.Tensor]: A tuple contains five elements. The first one\n is the gradient of ``scores`` whose shape is (B, npoint, K, M). The\n second is the gradient of ``point_features`` whose shape is\n (B, N, M, out_dim). The third is the gradient of\n ``center_features`` with the shape of (B, N, M, out_dim). The last\n two are ``None``.\n ' (_, point_features, center_features, scores, knn_idx) = ctx.saved_tensors agg = ctx.agg (B, N, M, out_dim) = point_features.size() (_, npoint, K, _) = scores.size() grad_point_features = point_features.new_zeros(point_features.shape) grad_center_features = center_features.new_zeros(center_features.shape) grad_scores = scores.new_zeros(scores.shape) ext_module.assign_score_withk_backward(grad_out.contiguous(), point_features.contiguous(), center_features.contiguous(), scores.contiguous(), knn_idx.contiguous(), grad_point_features, grad_center_features, grad_scores, B=B, N0=N, N1=npoint, M=M, K=K, O=out_dim, aggregate=agg) return (grad_scores, grad_point_features, grad_center_features, None, None)
class BallQuery(Function): 'Find nearby points in spherical space.' @staticmethod def forward(ctx, min_radius: float, max_radius: float, sample_num: int, xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor: '\n Args:\n min_radius (float): minimum radius of the balls.\n max_radius (float): maximum radius of the balls.\n sample_num (int): maximum number of features in the balls.\n xyz (Tensor): (B, N, 3) xyz coordinates of the features.\n center_xyz (torch.Tensor): (B, npoint, 3) centers of the ball\n query.\n\n Returns:\n torch.Tensor: (B, npoint, nsample) tensor with the indices of the\n features that form the query balls.\n ' assert center_xyz.is_contiguous() assert xyz.is_contiguous() assert (min_radius < max_radius) (B, N, _) = xyz.size() npoint = center_xyz.size(1) idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int) ext_module.ball_query_forward(center_xyz, xyz, idx, b=B, n=N, m=npoint, min_radius=min_radius, max_radius=max_radius, nsample=sample_num) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(idx) return idx @staticmethod def backward(ctx, a=None): return (None, None, None, None)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0): 'Calculate overlap between two set of bboxes.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n Args:\n bboxes1 (torch.Tensor): shape (m, 4) in <x1, y1, x2, y2> format or\n empty.\n bboxes2 (torch.Tensor): shape (n, 4) in <x1, y1, x2, y2> format or\n empty. If aligned is ``True``, then m and n must be equal.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (m, n) else (m, 1).\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> bbox_overlaps(bboxes1, bboxes2)\n tensor([[0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 1.0000],\n [0.0000, 0.0000, 0.0000]])\n\n Example:\n >>> empty = torch.FloatTensor([])\n >>> nonempty = torch.FloatTensor([\n >>> [0, 0, 10, 9],\n >>> ])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n ' mode_dict = {'iou': 0, 'iof': 1} assert (mode in mode_dict.keys()) mode_flag = mode_dict[mode] assert ((bboxes1.size((- 1)) == 4) or (bboxes1.size(0) == 0)) assert ((bboxes2.size((- 1)) == 4) or (bboxes2.size(0) == 0)) assert ((offset == 1) or (offset == 0)) rows = bboxes1.size(0) cols = bboxes2.size(0) if aligned: assert (rows == cols) if ((rows * cols) == 0): return (bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols)) if aligned: ious = bboxes1.new_zeros(rows) else: ious = bboxes1.new_zeros((rows, cols)) ext_module.bbox_overlaps(bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset) return ious
class BorderAlignFunction(Function): @staticmethod def symbolic(g, input, boxes, pool_size): return g.op('mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size) @staticmethod def forward(ctx, input, boxes, pool_size): ctx.pool_size = pool_size ctx.input_shape = input.size() assert (boxes.ndim == 3), 'boxes must be with shape [B, H*W, 4]' assert (boxes.size(2) == 4), 'the last dimension of boxes must be (x1, y1, x2, y2)' assert ((input.size(1) % 4) == 0), 'the channel for input feature must be divisible by factor 4' output_shape = (input.size(0), (input.size(1) // 4), boxes.size(1), 4) output = input.new_zeros(output_shape) argmax_idx = input.new_zeros(output_shape).to(torch.int) ext_module.border_align_forward(input, boxes, output, argmax_idx, pool_size=ctx.pool_size) ctx.save_for_backward(boxes, argmax_idx) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (boxes, argmax_idx) = ctx.saved_tensors grad_input = grad_output.new_zeros(ctx.input_shape) grad_output = grad_output.contiguous() ext_module.border_align_backward(grad_output, boxes, argmax_idx, grad_input, pool_size=ctx.pool_size) return (grad_input, None, None)
class BorderAlign(nn.Module): "Border align pooling layer.\n\n Applies border_align over the input feature based on predicted bboxes.\n The details were described in the paper\n `BorderDet: Border Feature for Dense Object Detection\n <https://arxiv.org/abs/2007.11056>`_.\n\n For each border line (e.g. top, left, bottom or right) of each box,\n border_align does the following:\n\n 1. uniformly samples ``pool_size`` +1 positions on this line, involving\n the start and end points.\n 2. the corresponding features on these points are computed by bilinear\n interpolation.\n 3. max pooling over all the ``pool_size`` +1 positions are used for\n computing pooled feature.\n\n Args:\n pool_size (int): number of positions sampled over the boxes' borders\n (e.g. top, bottom, left, right).\n " def __init__(self, pool_size): super(BorderAlign, self).__init__() self.pool_size = pool_size def forward(self, input, boxes): '\n Args:\n input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),\n [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,\n right features respectively.\n boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).\n\n Returns:\n torch.Tensor: Pooled features with shape [N,C,H*W,4]. The order is\n (top,left,bottom,right) for the last dimension.\n ' return border_align(input, boxes, self.pool_size) def __repr__(self): s = self.__class__.__name__ s += f'(pool_size={self.pool_size})' return s
def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False, clockwise=True): 'Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & -\\sin\\alpha \\\\\n \\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha+0.5h\\sin\\alpha\n \\\\\n y_{center}-0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\begin{pmatrix}\n \\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\n \\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\begin{pmatrix} x_A \\\\ y_A\\end{pmatrix}\n =\n \\begin{pmatrix} x_{center} \\\\ y_{center}\\end{pmatrix} +\n \\begin{pmatrix}\\cos\\alpha & \\sin\\alpha \\\\\n -\\sin\\alpha & \\cos\\alpha\\end{pmatrix}\n \\begin{pmatrix} -0.5w \\\\ -0.5h\\end{pmatrix} \\\\\n =\n \\begin{pmatrix} x_{center}-0.5w\\cos\\alpha-0.5h\\sin\\alpha\n \\\\\n y_{center}+0.5w\\sin\\alpha-0.5h\\cos\\alpha\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): "iou" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n ' assert (mode in ['iou', 'iof']) mode_dict = {'iou': 0, 'iof': 1} mode_flag = mode_dict[mode] rows = bboxes1.size(0) cols = bboxes2.size(0) if aligned: ious = bboxes1.new_zeros(rows) else: ious = bboxes1.new_zeros((rows * cols)) if (not clockwise): flip_mat = bboxes1.new_ones(bboxes1.shape[(- 1)]) flip_mat[(- 1)] = (- 1) bboxes1 = (bboxes1 * flip_mat) bboxes2 = (bboxes2 * flip_mat) bboxes1 = bboxes1.contiguous() bboxes2 = bboxes2.contiguous() ext_module.box_iou_rotated(bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned) if (not aligned): ious = ious.view(rows, cols) return ious
class CARAFENaiveFunction(Function): @staticmethod def symbolic(g, features, masks, kernel_size, group_size, scale_factor): return g.op('mmcv::MMCVCARAFENaive', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor) @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor): assert (scale_factor >= 1) assert (masks.size(1) == ((kernel_size * kernel_size) * group_size)) assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor)) assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor)) assert ((features.size(1) % group_size) == 0) assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1)) ctx.kernel_size = kernel_size ctx.group_size = group_size ctx.scale_factor = scale_factor ctx.feature_size = features.size() ctx.mask_size = masks.size() (n, c, h, w) = features.size() output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor))) ext_module.carafe_naive_forward(features, masks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) if (features.requires_grad or masks.requires_grad): ctx.save_for_backward(features, masks) return output @staticmethod def backward(ctx, grad_output): assert grad_output.is_cuda (features, masks) = ctx.saved_tensors kernel_size = ctx.kernel_size group_size = ctx.group_size scale_factor = ctx.scale_factor grad_input = torch.zeros_like(features) grad_masks = torch.zeros_like(masks) ext_module.carafe_naive_backward(grad_output.contiguous(), features, masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) return (grad_input, grad_masks, None, None, None)
class CARAFENaive(Module): def __init__(self, kernel_size, group_size, scale_factor): super(CARAFENaive, self).__init__() assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int)) self.kernel_size = kernel_size self.group_size = group_size self.scale_factor = scale_factor def forward(self, features, masks): return carafe_naive(features, masks, self.kernel_size, self.group_size, self.scale_factor)
class CARAFEFunction(Function): @staticmethod def symbolic(g, features, masks, kernel_size, group_size, scale_factor): return g.op('mmcv::MMCVCARAFE', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor) @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor): assert (scale_factor >= 1) assert (masks.size(1) == ((kernel_size * kernel_size) * group_size)) assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor)) assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor)) assert ((features.size(1) % group_size) == 0) assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1)) ctx.kernel_size = kernel_size ctx.group_size = group_size ctx.scale_factor = scale_factor ctx.feature_size = features.size() ctx.mask_size = masks.size() (n, c, h, w) = features.size() output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor))) routput = features.new_zeros(output.size(), requires_grad=False) rfeatures = features.new_zeros(features.size(), requires_grad=False) rmasks = masks.new_zeros(masks.size(), requires_grad=False) ext_module.carafe_forward(features, masks, rfeatures, routput, rmasks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) if (features.requires_grad or masks.requires_grad): ctx.save_for_backward(features, masks, rfeatures) return output @staticmethod def backward(ctx, grad_output): assert grad_output.is_cuda (features, masks, rfeatures) = ctx.saved_tensors kernel_size = ctx.kernel_size group_size = ctx.group_size scale_factor = ctx.scale_factor rgrad_output = torch.zeros_like(grad_output, requires_grad=False) rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False) rgrad_input = torch.zeros_like(features, requires_grad=False) rgrad_masks = torch.zeros_like(masks, requires_grad=False) grad_input = torch.zeros_like(features, requires_grad=False) grad_masks = torch.zeros_like(masks, requires_grad=False) ext_module.carafe_backward(grad_output.contiguous(), rfeatures, masks, rgrad_output, rgrad_input_hs, rgrad_input, rgrad_masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) return (grad_input, grad_masks, None, None, None)
class CARAFE(Module): ' CARAFE: Content-Aware ReAssembly of FEatures\n\n Please refer to `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_ for more details.\n\n Args:\n kernel_size (int): reassemble kernel size\n group_size (int): reassemble group size\n scale_factor (int): upsample ratio\n\n Returns:\n upsampled feature map\n ' def __init__(self, kernel_size, group_size, scale_factor): super(CARAFE, self).__init__() assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int)) self.kernel_size = kernel_size self.group_size = group_size self.scale_factor = scale_factor def forward(self, features, masks): return carafe(features, masks, self.kernel_size, self.group_size, self.scale_factor)
@UPSAMPLE_LAYERS.register_module(name='carafe') class CARAFEPack(nn.Module): 'A unified package of CARAFE upsampler that contains: 1) channel\n compressor 2) content encoder 3) CARAFE op.\n\n Official implementation of ICCV 2019 paper\n `CARAFE: Content-Aware ReAssembly of FEatures\n <https://arxiv.org/abs/1905.02188>`_.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n ' def __init__(self, channels, scale_factor, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64): super(CARAFEPack, self).__init__() self.channels = channels self.scale_factor = scale_factor self.up_kernel = up_kernel self.up_group = up_group self.encoder_kernel = encoder_kernel self.encoder_dilation = encoder_dilation self.compressed_channels = compressed_channels self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, 1) self.content_encoder = nn.Conv2d(self.compressed_channels, ((((self.up_kernel * self.up_kernel) * self.up_group) * self.scale_factor) * self.scale_factor), self.encoder_kernel, padding=int((((self.encoder_kernel - 1) * self.encoder_dilation) / 2)), dilation=self.encoder_dilation, groups=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') normal_init(self.content_encoder, std=0.001) def kernel_normalizer(self, mask): mask = F.pixel_shuffle(mask, self.scale_factor) (n, mask_c, h, w) = mask.size() mask_channel = int((mask_c / float((self.up_kernel ** 2)))) mask = mask.view(n, mask_channel, (- 1), h, w) mask = F.softmax(mask, dim=2, dtype=mask.dtype) mask = mask.view(n, mask_c, h, w).contiguous() return mask def feature_reassemble(self, x, mask): x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor) return x def forward(self, x): compressed_x = self.channel_compressor(x) mask = self.content_encoder(compressed_x) mask = self.kernel_normalizer(mask) x = self.feature_reassemble(x, mask) return x
def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num): 'Expand kernel contours so that foreground pixels are assigned into\n instances.\n\n Args:\n kernel_mask (np.array or torch.Tensor): The instance kernel mask with\n size hxw.\n internal_kernel_label (np.array or torch.Tensor): The instance internal\n kernel label with size hxw.\n min_kernel_area (int): The minimum kernel area.\n kernel_num (int): The instance kernel number.\n\n Returns:\n list: The instance index map with size hxw.\n ' assert isinstance(kernel_mask, (torch.Tensor, np.ndarray)) assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray)) assert isinstance(min_kernel_area, int) assert isinstance(kernel_num, int) if isinstance(kernel_mask, np.ndarray): kernel_mask = torch.from_numpy(kernel_mask) if isinstance(internal_kernel_label, np.ndarray): internal_kernel_label = torch.from_numpy(internal_kernel_label) if (torch.__version__ == 'parrots'): if ((kernel_mask.shape[0] == 0) or (internal_kernel_label.shape[0] == 0)): label = [] else: label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area=min_kernel_area, kernel_num=kernel_num) label = label.tolist() else: label = ext_module.contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, kernel_num) return label
class TopPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top'])) return output @staticmethod def forward(ctx, input): output = ext_module.top_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.top_pool_backward(input, grad_output) return output
class BottomPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom'])) return output @staticmethod def forward(ctx, input): output = ext_module.bottom_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.bottom_pool_backward(input, grad_output) return output
class LeftPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left'])) return output @staticmethod def forward(ctx, input): output = ext_module.left_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.left_pool_backward(input, grad_output) return output
class RightPoolFunction(Function): @staticmethod def symbolic(g, input): output = g.op('mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right'])) return output @staticmethod def forward(ctx, input): output = ext_module.right_pool_forward(input) ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors output = ext_module.right_pool_backward(input, grad_output) return output
class CornerPool(nn.Module): "Corner Pooling.\n\n Corner Pooling is a new type of pooling layer that helps a\n convolutional network better localize corners of bounding boxes.\n\n Please refer to `CornerNet: Detecting Objects as Paired Keypoints\n <https://arxiv.org/abs/1808.01244>`_ for more details.\n\n Code is modified from https://github.com/princeton-vl/CornerNet-Lite.\n\n Args:\n mode (str): Pooling orientation for the pooling layer\n\n - 'bottom': Bottom Pooling\n - 'left': Left Pooling\n - 'right': Right Pooling\n - 'top': Top Pooling\n\n Returns:\n Feature map after pooling.\n " pool_functions = {'bottom': BottomPoolFunction, 'left': LeftPoolFunction, 'right': RightPoolFunction, 'top': TopPoolFunction} cummax_dim_flip = {'bottom': (2, False), 'left': (3, True), 'right': (3, False), 'top': (2, True)} def __init__(self, mode): super(CornerPool, self).__init__() assert (mode in self.pool_functions) self.mode = mode self.corner_pool = self.pool_functions[mode] def forward(self, x): if ((torch.__version__ != 'parrots') and (torch.__version__ >= '1.5.0')): if torch.onnx.is_in_onnx_export(): assert (torch.__version__ >= '1.7.0'), "When `cummax` serves as an intermediate component whose outputs is used as inputs for another modules, it's expected that pytorch version must be >= 1.7.0, otherwise Error appears like: `RuntimeError: tuple appears in op that does not forward tuples, unsupported kind: prim::PythonOp`." (dim, flip) = self.cummax_dim_flip[self.mode] if flip: x = x.flip(dim) (pool_tensor, _) = torch.cummax(x, dim=dim) if flip: pool_tensor = pool_tensor.flip(dim) return pool_tensor else: return self.corner_pool.apply(x)
class CorrelationFunction(Function): @staticmethod def forward(ctx, input1, input2, kernel_size=1, max_displacement=1, stride=1, padding=1, dilation=1, dilation_patch=1): ctx.save_for_backward(input1, input2) (kH, kW) = ctx.kernel_size = _pair(kernel_size) patch_size = ((max_displacement * 2) + 1) ctx.patch_size = patch_size (dH, dW) = ctx.stride = _pair(stride) (padH, padW) = ctx.padding = _pair(padding) (dilationH, dilationW) = ctx.dilation = _pair(dilation) (dilation_patchH, dilation_patchW) = ctx.dilation_patch = _pair(dilation_patch) output_size = CorrelationFunction._output_size(ctx, input1) output = input1.new_zeros(output_size) ext_module.correlation_forward(input1, input2, output, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input1, input2) = ctx.saved_tensors (kH, kW) = ctx.kernel_size patch_size = ctx.patch_size (padH, padW) = ctx.padding (dilationH, dilationW) = ctx.dilation (dilation_patchH, dilation_patchW) = ctx.dilation_patch (dH, dW) = ctx.stride grad_input1 = torch.zeros_like(input1) grad_input2 = torch.zeros_like(input2) ext_module.correlation_backward(grad_output, input1, input2, grad_input1, grad_input2, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW) return (grad_input1, grad_input2, None, None, None, None, None, None) @staticmethod def _output_size(ctx, input1): (iH, iW) = (input1.size(2), input1.size(3)) batch_size = input1.size(0) (kH, kW) = ctx.kernel_size patch_size = ctx.patch_size (dH, dW) = ctx.stride (padH, padW) = ctx.padding (dilationH, dilationW) = ctx.dilation dilatedKH = (((kH - 1) * dilationH) + 1) dilatedKW = (((kW - 1) * dilationW) + 1) oH = int(((((iH + (2 * padH)) - dilatedKH) / dH) + 1)) oW = int(((((iW + (2 * padW)) - dilatedKW) / dW) + 1)) output_size = (batch_size, patch_size, patch_size, oH, oW) return output_size
class Correlation(nn.Module): "Correlation operator\n\n This correlation operator works for optical flow correlation computation.\n\n There are two batched tensors with shape :math:`(N, C, H, W)`,\n and the correlation output's shape is :math:`(N, max\\_displacement \\times\n 2 + 1, max\\_displacement * 2 + 1, H_{out}, W_{out})`\n\n where\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times padding -\n dilation \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times padding - dilation\n \\times (kernel\\_size - 1) - 1}\n {stride} + 1\\right\\rfloor\n\n the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding\n window convolution between input1 and shifted input2,\n\n .. math::\n Corr(N_i, dx, dy) =\n \\sum_{c=0}^{C-1}\n input1(N_i, c) \\star\n \\mathcal{S}(input2(N_i, c), dy, dx)\n\n where :math:`\\star` is the valid 2d sliding window convolution operator,\n and :math:`\\mathcal{S}` means shifting the input features (auto-complete\n zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \\in\n [-max\\_displacement \\times dilation\\_patch, max\\_displacement \\times\n dilation\\_patch]`.\n\n Args:\n kernel_size (int): The size of sliding window i.e. local neighborhood\n representing the center points and involved in correlation\n computation. Defaults to 1.\n max_displacement (int): The radius for computing correlation volume,\n but the actual working space can be dilated by dilation_patch.\n Defaults to 1.\n stride (int): The stride of the sliding blocks in the input spatial\n dimensions. Defaults to 1.\n padding (int): Zero padding added to all four sides of the input1.\n Defaults to 0.\n dilation (int): The spacing of local neighborhood that will involved\n in correlation. Defaults to 1.\n dilation_patch (int): The spacing between position need to compute\n correlation. Defaults to 1.\n " def __init__(self, kernel_size: int=1, max_displacement: int=1, stride: int=1, padding: int=0, dilation: int=1, dilation_patch: int=1) -> None: super().__init__() self.kernel_size = kernel_size self.max_displacement = max_displacement self.stride = stride self.padding = padding self.dilation = dilation self.dilation_patch = dilation_patch def forward(self, input1: Tensor, input2: Tensor) -> Tensor: return CorrelationFunction.apply(input1, input2, self.kernel_size, self.max_displacement, self.stride, self.padding, self.dilation, self.dilation_patch) def __repr__(self) -> str: s = self.__class__.__name__ s += f'(kernel_size={self.kernel_size}, ' s += f'max_displacement={self.max_displacement}, ' s += f'stride={self.stride}, ' s += f'padding={self.padding}, ' s += f'dilation={self.dilation}, ' s += f'dilation_patch={self.dilation_patch})' return s
class DeformRoIPoolFunction(Function): @staticmethod def symbolic(g, input, rois, offset, output_size, spatial_scale, sampling_ratio, gamma): return g.op('mmcv::MMCVDeformRoIPool', input, rois, offset, pooled_height_i=output_size[0], pooled_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_f=sampling_ratio, gamma_f=gamma) @staticmethod def forward(ctx, input, rois, offset, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): if (offset is None): offset = input.new_zeros(0) ctx.output_size = _pair(output_size) ctx.spatial_scale = float(spatial_scale) ctx.sampling_ratio = int(sampling_ratio) ctx.gamma = float(gamma) assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!' output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1]) output = input.new_zeros(output_shape) ext_module.deform_roi_pool_forward(input, rois, offset, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma) ctx.save_for_backward(input, rois, offset) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, rois, offset) = ctx.saved_tensors grad_input = grad_output.new_zeros(input.shape) grad_offset = grad_output.new_zeros(offset.shape) ext_module.deform_roi_pool_backward(grad_output, input, rois, offset, grad_input, grad_offset, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma) if (grad_offset.numel() == 0): grad_offset = None return (grad_input, None, grad_offset, None, None, None, None)
class DeformRoIPool(nn.Module): def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(DeformRoIPool, self).__init__() self.output_size = _pair(output_size) self.spatial_scale = float(spatial_scale) self.sampling_ratio = int(sampling_ratio) self.gamma = float(gamma) def forward(self, input, rois, offset=None): return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
class DeformRoIPoolPack(DeformRoIPool): def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma) self.output_channels = output_channels self.deform_fc_channels = deform_fc_channels self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2))) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() def forward(self, input, rois): assert (input.size(1) == self.output_channels) x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) rois_num = rois.size(0) offset = self.offset_fc(x.view(rois_num, (- 1))) offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1]) return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
class ModulatedDeformRoIPoolPack(DeformRoIPool): def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1): super(ModulatedDeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma) self.output_channels = output_channels self.deform_fc_channels = deform_fc_channels self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2))) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() self.mask_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 1)), nn.Sigmoid()) self.mask_fc[2].weight.data.zero_() self.mask_fc[2].bias.data.zero_() def forward(self, input, rois): assert (input.size(1) == self.output_channels) x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) rois_num = rois.size(0) offset = self.offset_fc(x.view(rois_num, (- 1))) offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1]) mask = self.mask_fc(x.view(rois_num, (- 1))) mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) d = deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) return (d * mask)
class Conv2d_deprecated(Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing Conv2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class ConvTranspose2d_deprecated(ConvTranspose2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing ConvTranspose2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class MaxPool2d_deprecated(MaxPool2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class Linear_deprecated(Linear): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('Importing Linear wrapper from "mmcv.ops" will be deprecated in the future. Please import them from "mmcv.cnn" instead', DeprecationWarning)
class SigmoidFocalLossFunction(Function): @staticmethod def symbolic(g, input, target, gamma, alpha, weight, reduction): return g.op('mmcv::MMCVSigmoidFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction) @staticmethod def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'): assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) assert (input.dim() == 2) assert (target.dim() == 1) assert (input.size(0) == target.size(0)) if (weight is None): weight = input.new_empty(0) else: assert (weight.dim() == 1) assert (input.size(1) == weight.size(0)) ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} assert (reduction in ctx.reduction_dict.keys()) ctx.gamma = float(gamma) ctx.alpha = float(alpha) ctx.reduction = ctx.reduction_dict[reduction] output = input.new_zeros(input.size()) ext_module.sigmoid_focal_loss_forward(input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) if (ctx.reduction == ctx.reduction_dict['mean']): output = (output.sum() / input.size(0)) elif (ctx.reduction == ctx.reduction_dict['sum']): output = output.sum() ctx.save_for_backward(input, target, weight) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, target, weight) = ctx.saved_tensors grad_input = input.new_zeros(input.size()) ext_module.sigmoid_focal_loss_backward(input, target, weight, grad_input, gamma=ctx.gamma, alpha=ctx.alpha) grad_input *= grad_output if (ctx.reduction == ctx.reduction_dict['mean']): grad_input /= input.size(0) return (grad_input, None, None, None, None, None)
class SigmoidFocalLoss(nn.Module): def __init__(self, gamma, alpha, weight=None, reduction='mean'): super(SigmoidFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.register_buffer('weight', weight) self.reduction = reduction def forward(self, input, target): return sigmoid_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction) def __repr__(self): s = self.__class__.__name__ s += f'(gamma={self.gamma}, ' s += f'alpha={self.alpha}, ' s += f'reduction={self.reduction})' return s
class SoftmaxFocalLossFunction(Function): @staticmethod def symbolic(g, input, target, gamma, alpha, weight, reduction): return g.op('mmcv::MMCVSoftmaxFocalLoss', input, target, gamma_f=gamma, alpha_f=alpha, weight_f=weight, reduction_s=reduction) @staticmethod def forward(ctx, input, target, gamma=2.0, alpha=0.25, weight=None, reduction='mean'): assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) assert (input.dim() == 2) assert (target.dim() == 1) assert (input.size(0) == target.size(0)) if (weight is None): weight = input.new_empty(0) else: assert (weight.dim() == 1) assert (input.size(1) == weight.size(0)) ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} assert (reduction in ctx.reduction_dict.keys()) ctx.gamma = float(gamma) ctx.alpha = float(alpha) ctx.reduction = ctx.reduction_dict[reduction] (channel_stats, _) = torch.max(input, dim=1) input_softmax = (input - channel_stats.unsqueeze(1).expand_as(input)) input_softmax.exp_() channel_stats = input_softmax.sum(dim=1) input_softmax /= channel_stats.unsqueeze(1).expand_as(input) output = input.new_zeros(input.size(0)) ext_module.softmax_focal_loss_forward(input_softmax, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) if (ctx.reduction == ctx.reduction_dict['mean']): output = (output.sum() / input.size(0)) elif (ctx.reduction == ctx.reduction_dict['sum']): output = output.sum() ctx.save_for_backward(input_softmax, target, weight) return output @staticmethod def backward(ctx, grad_output): (input_softmax, target, weight) = ctx.saved_tensors buff = input_softmax.new_zeros(input_softmax.size(0)) grad_input = input_softmax.new_zeros(input_softmax.size()) ext_module.softmax_focal_loss_backward(input_softmax, target, weight, buff, grad_input, gamma=ctx.gamma, alpha=ctx.alpha) grad_input *= grad_output if (ctx.reduction == ctx.reduction_dict['mean']): grad_input /= input_softmax.size(0) return (grad_input, None, None, None, None, None)
class SoftmaxFocalLoss(nn.Module): def __init__(self, gamma, alpha, weight=None, reduction='mean'): super(SoftmaxFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.register_buffer('weight', weight) self.reduction = reduction def forward(self, input, target): return softmax_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction) def __repr__(self): s = self.__class__.__name__ s += f'(gamma={self.gamma}, ' s += f'alpha={self.alpha}, ' s += f'reduction={self.reduction})' return s