code
stringlengths
17
6.64M
def obj_from_dict(info, parent=None, default_args=None): 'Initialize an object from dict.\n\n The dict must contain the key "type", which indicates the object type, it\n can be either a string or type, such as "list" or ``list``. Remaining\n fields are treated as the arguments for constructing the object.\n\n Args:\n info (dict): Object types and arguments.\n parent (:class:`module`): Module which may containing expected object\n classes.\n default_args (dict, optional): Default arguments for initializing the\n object.\n\n Returns:\n any type: Object built from the dict.\n ' assert (isinstance(info, dict) and ('type' in info)) assert (isinstance(default_args, dict) or (default_args is None)) args = info.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if (parent is not None): obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif (not isinstance(obj_type, type)): raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n rank_shift (bool): Whether to add rank number to the random seed to\n have different random seed in different threads. Default: False.\n ' if use_rank_shift: (rank, _) = mmcv.runner.get_dist_info() seed += rank random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def is_tensorrt_available(): try: import tensorrt del tensorrt return True except ModuleNotFoundError: return False
def get_tensorrt_op_path(): 'Get TensorRT plugins library path.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_trt.*.so') paths = glob.glob(wildcard) lib_path = (paths[0] if (len(paths) > 0) else '') return lib_path
def is_tensorrt_plugin_loaded(): 'Check if TensorRT plugins library is loaded or not.\n\n Returns:\n bool: plugin_is_loaded flag\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded return plugin_is_loaded
def load_tensorrt_plugin(): 'load TensorRT plugins library.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded lib_path = get_tensorrt_op_path() if ((not plugin_is_loaded) and os.path.exists(lib_path)): ctypes.CDLL(lib_path) plugin_is_loaded = True
def preprocess_onnx(onnx_model): 'Modify onnx model to match with TensorRT plugins in mmcv.\n\n There are some conflict between onnx node definition and TensorRT limit.\n This function perform preprocess on the onnx model to solve the conflicts.\n For example, onnx `attribute` is loaded in TensorRT on host and onnx\n `input` is loaded on device. The shape inference is performed on host, so\n any `input` related to shape (such as `max_output_boxes_per_class` in\n NonMaxSuppression) should be transformed to `attribute` before conversion.\n\n Arguments:\n onnx_model (onnx.ModelProto): Input onnx model.\n\n Returns:\n onnx.ModelProto: Modified onnx model.\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) graph = onnx_model.graph nodes = graph.node initializers = graph.initializer node_dict = {} for node in nodes: node_outputs = node.output for output in node_outputs: if (len(output) > 0): node_dict[output] = node init_dict = {_.name: _ for _ in initializers} nodes_name_to_remove = set() def is_node_without_output(name): for (node_name, node) in node_dict.items(): if (node_name not in nodes_name_to_remove): if (name in node.input): return False return True def mark_nodes_to_remove(name): node = node_dict[name] nodes_name_to_remove.add(name) for input_node_name in node.input: if is_node_without_output(input_node_name): mark_nodes_to_remove(input_node_name) def parse_data(name, typ, default_value=0): if (name in node_dict): node = node_dict[name] if (node.op_type == 'Constant'): raw_data = node.attribute[0].t.raw_data else: mark_nodes_to_remove(name) return default_value elif (name in init_dict): raw_data = init_dict[name].raw_data else: raise ValueError(f'{name} not found in node or initilizer.') return np.frombuffer(raw_data, typ).item() nrof_node = len(nodes) for idx in range(nrof_node): node = nodes[idx] node_attributes = node.attribute node_inputs = node.input node_outputs = node.output node_name = node.name if (node.op_type == 'NonMaxSuppression'): center_point_box = 0 max_output_boxes_per_class = 1000000 iou_threshold = 0.3 score_threshold = 0.0 offset = 0 for attribute in node_attributes: if (attribute.name == 'center_point_box'): center_point_box = attribute.i elif (attribute.name == 'offset'): offset = attribute.i if (len(node_inputs) >= 3): max_output_boxes_per_class = parse_data(node_inputs[2], np.int64, max_output_boxes_per_class) mark_nodes_to_remove(node_inputs[2]) if (len(node_inputs) >= 4): iou_threshold = parse_data(node_inputs[3], np.float32, iou_threshold) mark_nodes_to_remove(node_inputs[3]) if (len(node_inputs) >= 5): score_threshold = parse_data(node_inputs[4], np.float32) mark_nodes_to_remove(node_inputs[4]) new_node = onnx.helper.make_node('NonMaxSuppression', node_inputs[:2], node_outputs, name=node_name, center_point_box=center_point_box, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, offset=offset) for output in node_outputs: if (output in node_dict): node_dict[output] = new_node nodes.insert(idx, new_node) nodes.remove(node) elif (node.op_type == 'InstanceNormalization'): node.op_type = 'MMCVInstanceNormalization' for node_name in nodes_name_to_remove: nodes.remove(node_dict[node_name]) return onnx_model
def onnx2trt(onnx_model, opt_shape_dict, log_level=trt.Logger.ERROR, fp16_mode=False, max_workspace_size=0, device_id=0): 'Convert onnx model to tensorrt engine.\n\n Arguments:\n onnx_model (str or onnx.ModelProto): the onnx model to convert from\n opt_shape_dict (dict): the min/opt/max shape of each input\n log_level (TensorRT log level): the log level of TensorRT\n fp16_mode (bool): enable fp16 mode\n max_workspace_size (int): set max workspace size of TensorRT engine.\n some tactic and layers need large workspace.\n device_id (int): choice the device to create engine.\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine created from onnx_model\n\n Example:\n >>> engine = onnx2trt(\n >>> "onnx_model.onnx",\n >>> {\'input\': [[1, 3, 160, 160],\n >>> [1, 3, 320, 320],\n >>> [1, 3, 640, 640]]},\n >>> log_level=trt.Logger.WARNING,\n >>> fp16_mode=True,\n >>> max_workspace_size=1 << 30,\n >>> device_id=0)\n >>> })\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) device = torch.device('cuda:{}'.format(device_id)) logger = trt.Logger(log_level) builder = trt.Builder(logger) EXPLICIT_BATCH = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(EXPLICIT_BATCH) parser = trt.OnnxParser(network, logger) if isinstance(onnx_model, str): onnx_model = onnx.load(onnx_model) onnx_model = preprocess_onnx(onnx_model) if (not parser.parse(onnx_model.SerializeToString())): error_msgs = '' for error in range(parser.num_errors): error_msgs += f'''{parser.get_error(error)} ''' raise RuntimeError(f'''parse onnx failed: {error_msgs}''') builder.max_workspace_size = max_workspace_size config = builder.create_builder_config() config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for (input_name, param) in opt_shape_dict.items(): min_shape = tuple(param[0][:]) opt_shape = tuple(param[1][:]) max_shape = tuple(param[2][:]) profile.set_shape(input_name, min_shape, opt_shape, max_shape) config.add_optimization_profile(profile) if fp16_mode: builder.fp16_mode = fp16_mode config.set_flag(trt.BuilderFlag.FP16) with torch.cuda.device(device): engine = builder.build_engine(network, config) return engine
def save_trt_engine(engine, path): 'Serialize TensorRT engine to disk.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to serialize\n path (str): disk path to write the engine\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with open(path, mode='wb') as f: f.write(bytearray(engine.serialize()))
def load_trt_engine(path): 'Deserialize TensorRT engine from disk.\n\n Arguments:\n path (str): disk path to read the engine\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine loaded from disk\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with trt.Logger() as logger, trt.Runtime(logger) as runtime: with open(path, mode='rb') as f: engine_bytes = f.read() engine = runtime.deserialize_cuda_engine(engine_bytes) return engine
def torch_dtype_from_trt(dtype): 'Convert pytorch dtype to TensorRT dtype.' if (dtype == trt.bool): return torch.bool elif (dtype == trt.int8): return torch.int8 elif (dtype == trt.int32): return torch.int32 elif (dtype == trt.float16): return torch.float16 elif (dtype == trt.float32): return torch.float32 else: raise TypeError(('%s is not supported by torch' % dtype))
def torch_device_from_trt(device): 'Convert pytorch device to TensorRT device.' if (device == trt.TensorLocation.DEVICE): return torch.device('cuda') elif (device == trt.TensorLocation.HOST): return torch.device('cpu') else: return TypeError(('%s is not supported by torch' % device))
class TRTWrapper(torch.nn.Module): 'TensorRT engine Wrapper.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to wrap\n input_names (list[str]): names of each inputs\n output_names (list[str]): names of each outputs\n\n Note:\n If the engine is converted from onnx model. The input_names and\n output_names should be the same as onnx model.\n ' def __init__(self, engine, input_names=None, output_names=None): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) super(TRTWrapper, self).__init__() self.engine = engine if isinstance(self.engine, str): self.engine = load_trt_engine(engine) if (not isinstance(self.engine, trt.ICudaEngine)): raise TypeError('engine should be str or trt.ICudaEngine') self._register_state_dict_hook(TRTWrapper._on_state_dict) self.context = self.engine.create_execution_context() if ((input_names is None) or (output_names is None)): names = [_ for _ in self.engine] input_names = list(filter(self.engine.binding_is_input, names)) output_names = list((set(names) - set(input_names))) self.input_names = input_names self.output_names = output_names def _on_state_dict(self, state_dict, prefix, local_metadata): state_dict[(prefix + 'engine')] = bytearray(self.engine.serialize()) state_dict[(prefix + 'input_names')] = self.input_names state_dict[(prefix + 'output_names')] = self.output_names def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): engine_bytes = state_dict[(prefix + 'engine')] with trt.Logger() as logger, trt.Runtime(logger) as runtime: self.engine = runtime.deserialize_cuda_engine(engine_bytes) self.context = self.engine.create_execution_context() self.input_names = state_dict[(prefix + 'input_names')] self.output_names = state_dict[(prefix + 'output_names')] def forward(self, inputs): '\n Arguments:\n inputs (dict): dict of input name-tensors pair\n\n Return:\n dict: dict of output name-tensors pair\n ' assert (self.input_names is not None) assert (self.output_names is not None) bindings = ([None] * (len(self.input_names) + len(self.output_names))) for (input_name, input_tensor) in inputs.items(): idx = self.engine.get_binding_index(input_name) if (input_tensor.dtype == torch.long): input_tensor = input_tensor.int() self.context.set_binding_shape(idx, tuple(input_tensor.shape)) bindings[idx] = input_tensor.contiguous().data_ptr() outputs = {} for (i, output_name) in enumerate(self.output_names): idx = self.engine.get_binding_index(output_name) dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) shape = tuple(self.context.get_binding_shape(idx)) device = torch_device_from_trt(self.engine.get_location(idx)) output = torch.empty(size=shape, dtype=dtype, device=device) outputs[output_name] = output bindings[idx] = output.data_ptr() self.context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) return outputs
class TRTWraper(TRTWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('TRTWraper will be deprecated in future. Please use TRTWrapper instead', DeprecationWarning)
class ConfigDict(Dict): def __missing__(self, name): raise KeyError(name) def __getattr__(self, name): try: value = super(ConfigDict, self).__getattr__(name) except KeyError: ex = AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") except Exception as e: ex = e else: return value raise ex
def add_args(parser, cfg, prefix=''): for (k, v) in cfg.items(): if isinstance(v, str): parser.add_argument((('--' + prefix) + k)) elif isinstance(v, int): parser.add_argument((('--' + prefix) + k), type=int) elif isinstance(v, float): parser.add_argument((('--' + prefix) + k), type=float) elif isinstance(v, bool): parser.add_argument((('--' + prefix) + k), action='store_true') elif isinstance(v, dict): add_args(parser, v, ((prefix + k) + '.')) elif isinstance(v, abc.Iterable): parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+') else: print(f'cannot parse key {(prefix + k)} of type {type(v)}') return parser
class Config(): 'A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {\'b1\': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile(\'tests/data/config/a.py\')\n >>> cfg.filename\n "/home/kchen/projects/mmcv/tests/data/config/a.py"\n >>> cfg.item4\n \'test\'\n >>> cfg\n "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "\n "{\'item1\': [1, 2], \'item2\': {\'a\': 0}, \'item3\': True, \'item4\': \'test\'}"\n ' @staticmethod def _validate_py_syntax(filename): with open(filename, 'r', encoding='utf-8') as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f'There are syntax errors in config file {filename}: {e}') @staticmethod def _substitute_predefined_vars(filename, temp_config_name): file_dirname = osp.dirname(filename) file_basename = osp.basename(filename) file_basename_no_extension = osp.splitext(file_basename)[0] file_extname = osp.splitext(filename)[1] support_templates = dict(fileDirname=file_dirname, fileBasename=file_basename, fileBasenameNoExtension=file_basename_no_extension, fileExtname=file_extname) with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() for (key, value) in support_templates.items(): regexp = (('\\{\\{\\s*' + str(key)) + '\\s*\\}\\}') value = value.replace('\\', '/') config_file = re.sub(regexp, value, config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) @staticmethod def _pre_substitute_base_vars(filename, temp_config_name): 'Substitute base variable placehoders to string, so that parsing\n would work.' with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() base_var_dict = {} regexp = (('\\{\\{\\s*' + BASE_KEY) + '\\.([\\w\\.]+)\\s*\\}\\}') base_vars = set(re.findall(regexp, config_file)) for base_var in base_vars: randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' base_var_dict[randstr] = base_var regexp = (((('\\{\\{\\s*' + BASE_KEY) + '\\.') + base_var) + '\\s*\\}\\}') config_file = re.sub(regexp, f'"{randstr}"', config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) return base_var_dict @staticmethod def _substitute_base_vars(cfg, base_var_dict, base_cfg): 'Substitute variable strings to their actual values.' cfg = copy.deepcopy(cfg) if isinstance(cfg, dict): for (k, v) in cfg.items(): if (isinstance(v, str) and (v in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[v].split('.'): new_v = new_v[new_k] cfg[k] = new_v elif isinstance(v, (list, tuple, dict)): cfg[k] = Config._substitute_base_vars(v, base_var_dict, base_cfg) elif isinstance(cfg, tuple): cfg = tuple((Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg)) elif isinstance(cfg, list): cfg = [Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg] elif (isinstance(cfg, str) and (cfg in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[cfg].split('.'): new_v = new_v[new_k] cfg = new_v return cfg @staticmethod def _file2dict(filename, use_predefined_variables=True): filename = osp.abspath(osp.expanduser(filename)) check_file_exist(filename) fileExtname = osp.splitext(filename)[1] if (fileExtname not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') with tempfile.TemporaryDirectory() as temp_config_dir: temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=fileExtname) if (platform.system() == 'Windows'): temp_config_file.close() temp_config_name = osp.basename(temp_config_file.name) if use_predefined_variables: Config._substitute_predefined_vars(filename, temp_config_file.name) else: shutil.copyfile(filename, temp_config_file.name) base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name) if filename.endswith('.py'): temp_module_name = osp.splitext(temp_config_name)[0] sys.path.insert(0, temp_config_dir) Config._validate_py_syntax(filename) mod = import_module(temp_module_name) sys.path.pop(0) cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))} del sys.modules[temp_module_name] elif filename.endswith(('.yml', '.yaml', '.json')): import mmcv cfg_dict = mmcv.load(temp_config_file.name) temp_config_file.close() if (DEPRECATION_KEY in cfg_dict): deprecation_info = cfg_dict.pop(DEPRECATION_KEY) warning_msg = f'The config file {filename} will be deprecated in the future.' if ('expected' in deprecation_info): warning_msg += f" Please use {deprecation_info['expected']} instead." if ('reference' in deprecation_info): warning_msg += f" More information can be found at {deprecation_info['reference']}" warnings.warn(warning_msg, DeprecationWarning) cfg_text = (filename + '\n') with open(filename, 'r', encoding='utf-8') as f: cfg_text += f.read() if (BASE_KEY in cfg_dict): cfg_dir = osp.dirname(filename) base_filename = cfg_dict.pop(BASE_KEY) base_filename = (base_filename if isinstance(base_filename, list) else [base_filename]) cfg_dict_list = list() cfg_text_list = list() for f in base_filename: (_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f)) cfg_dict_list.append(_cfg_dict) cfg_text_list.append(_cfg_text) base_cfg_dict = dict() for c in cfg_dict_list: duplicate_keys = (base_cfg_dict.keys() & c.keys()) if (len(duplicate_keys) > 0): raise KeyError(f'Duplicate key is not allowed among bases. Duplicate keys: {duplicate_keys}') base_cfg_dict.update(c) cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) cfg_dict = base_cfg_dict cfg_text_list.append(cfg_text) cfg_text = '\n'.join(cfg_text_list) return (cfg_dict, cfg_text) @staticmethod def _merge_a_into_b(a, b, allow_list_keys=False): "merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n " b = b.copy() for (k, v) in a.items(): if (allow_list_keys and k.isdigit() and isinstance(b, list)): k = int(k) if (len(b) <= k): raise KeyError(f'Index {k} exceeds the length of list {b}') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) elif isinstance(v, dict): if ((k in b) and (not v.pop(DELETE_KEY, False))): allowed_types = ((dict, list) if allow_list_keys else dict) if (not isinstance(b[k], allowed_types)): raise TypeError(f'{k}={v} in child config cannot inherit from base because {k} is a dict in the child config but is of type {type(b[k])} in base config. You may set `{DELETE_KEY}=True` to ignore the base config.') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) else: b[k] = ConfigDict(v) else: b[k] = v return b @staticmethod def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): (cfg_dict, cfg_text) = Config._file2dict(filename, use_predefined_variables) if (import_custom_modules and cfg_dict.get('custom_imports', None)): import_modules_from_strings(**cfg_dict['custom_imports']) return Config(cfg_dict, cfg_text=cfg_text, filename=filename) @staticmethod def fromstring(cfg_str, file_format): 'Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n ' if (file_format not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') if ((file_format != '.py') and ('dict(' in cfg_str)): warnings.warn('Please check "file_format", the file format may be .py') with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix=file_format, delete=False) as temp_file: temp_file.write(cfg_str) cfg = Config.fromfile(temp_file.name) os.remove(temp_file.name) return cfg @staticmethod def auto_argparser(description=None): 'Generate argparser from config file automatically (experimental)' partial_parser = ArgumentParser(description=description) partial_parser.add_argument('config', help='config file path') cfg_file = partial_parser.parse_known_args()[0].config cfg = Config.fromfile(cfg_file) parser = ArgumentParser(description=description) parser.add_argument('config', help='config file path') add_args(parser, cfg) return (parser, cfg) def __init__(self, cfg_dict=None, cfg_text=None, filename=None): if (cfg_dict is None): cfg_dict = dict() elif (not isinstance(cfg_dict, dict)): raise TypeError(f'cfg_dict must be a dict, but got {type(cfg_dict)}') for key in cfg_dict: if (key in RESERVED_KEYS): raise KeyError(f'{key} is reserved for config file') super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) super(Config, self).__setattr__('_filename', filename) if cfg_text: text = cfg_text elif filename: with open(filename, 'r') as f: text = f.read() else: text = '' super(Config, self).__setattr__('_text', text) @property def filename(self): return self._filename @property def text(self): return self._text @property def pretty_text(self): indent = 4 def _indent(s_, num_spaces): s = s_.split('\n') if (len(s) == 1): return s_ first = s.pop(0) s = [((num_spaces * ' ') + line) for line in s] s = '\n'.join(s) s = ((first + '\n') + s) return s def _format_basic_types(k, v, use_mapping=False): if isinstance(v, str): v_str = f"'{v}'" else: v_str = str(v) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = _indent(attr_str, indent) return attr_str def _format_list(k, v, use_mapping=False): if all((isinstance(_, dict) for _ in v)): v_str = '[\n' v_str += '\n'.join((f'dict({_indent(_format_dict(v_), indent)}),' for v_ in v)).rstrip(',') if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = (_indent(attr_str, indent) + ']') else: attr_str = _format_basic_types(k, v, use_mapping) return attr_str def _contain_invalid_identifier(dict_str): contain_invalid_identifier = False for key_name in dict_str: contain_invalid_identifier |= (not str(key_name).isidentifier()) return contain_invalid_identifier def _format_dict(input_dict, outest_level=False): r = '' s = [] use_mapping = _contain_invalid_identifier(input_dict) if use_mapping: r += '{' for (idx, (k, v)) in enumerate(input_dict.items()): is_last = (idx >= (len(input_dict) - 1)) end = ('' if (outest_level or is_last) else ',') if isinstance(v, dict): v_str = ('\n' + _format_dict(v)) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: dict({v_str}' else: attr_str = f'{str(k)}=dict({v_str}' attr_str = ((_indent(attr_str, indent) + ')') + end) elif isinstance(v, list): attr_str = (_format_list(k, v, use_mapping) + end) else: attr_str = (_format_basic_types(k, v, use_mapping) + end) s.append(attr_str) r += '\n'.join(s) if use_mapping: r += '}' return r cfg_dict = self._cfg_dict.to_dict() text = _format_dict(cfg_dict, outest_level=True) yapf_style = dict(based_on_style='pep8', blank_line_before_nested_class_or_def=True, split_before_expression_after_opening_paren=True) (text, _) = FormatCode(text, style_config=yapf_style, verify=True) return text def __repr__(self): return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' def __len__(self): return len(self._cfg_dict) def __getattr__(self, name): return getattr(self._cfg_dict, name) def __getitem__(self, name): return self._cfg_dict.__getitem__(name) def __setattr__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setattr__(name, value) def __setitem__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setitem__(name, value) def __iter__(self): return iter(self._cfg_dict) def __getstate__(self): return (self._cfg_dict, self._filename, self._text) def __copy__(self): cls = self.__class__ other = cls.__new__(cls) other.__dict__.update(self.__dict__) return other def __deepcopy__(self, memo): cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other for (key, value) in self.__dict__.items(): super(Config, other).__setattr__(key, copy.deepcopy(value, memo)) return other def __setstate__(self, state): (_cfg_dict, _filename, _text) = state super(Config, self).__setattr__('_cfg_dict', _cfg_dict) super(Config, self).__setattr__('_filename', _filename) super(Config, self).__setattr__('_text', _text) def dump(self, file=None): cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() if self.filename.endswith('.py'): if (file is None): return self.pretty_text else: with open(file, 'w', encoding='utf-8') as f: f.write(self.pretty_text) else: import mmcv if (file is None): file_format = self.filename.split('.')[(- 1)] return mmcv.dump(cfg_dict, file_format=file_format) else: mmcv.dump(cfg_dict, file) def merge_from_dict(self, options, allow_list_keys=True): "Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n " option_cfg_dict = {} for (full_key, v) in options.items(): d = option_cfg_dict key_list = full_key.split('.') for subkey in key_list[:(- 1)]: d.setdefault(subkey, ConfigDict()) d = d[subkey] subkey = key_list[(- 1)] d[subkey] = v cfg_dict = super(Config, self).__getattribute__('_cfg_dict') super(Config, self).__setattr__('_cfg_dict', Config._merge_a_into_b(option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))
class DictAction(Action): "\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options can\n be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit\n brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build\n list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'\n " @staticmethod def _parse_int_float_bool(val): try: return int(val) except ValueError: pass try: return float(val) except ValueError: pass if (val.lower() in ['true', 'false']): return (True if (val.lower() == 'true') else False) return val @staticmethod def _parse_iterable(val): "Parse iterable values in the string.\n\n All elements inside '()' or '[]' are treated as iterable values.\n\n Args:\n val (str): Value string.\n\n Returns:\n list | tuple: The expanded list or tuple from the string.\n\n Examples:\n >>> DictAction._parse_iterable('1,2,3')\n [1, 2, 3]\n >>> DictAction._parse_iterable('[a, b, c]')\n ['a', 'b', 'c']\n >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')\n [(1, 2, 3), ['a', 'b'], 'c']\n " def find_next_comma(string): "Find the position of next comma in the string.\n\n If no ',' is found in the string, return the string length. All\n chars inside '()' and '[]' are treated as one element and thus ','\n inside these brackets are ignored.\n " assert ((string.count('(') == string.count(')')) and (string.count('[') == string.count(']'))), f'Imbalanced brackets exist in {string}' end = len(string) for (idx, char) in enumerate(string): pre = string[:idx] if ((char == ',') and (pre.count('(') == pre.count(')')) and (pre.count('[') == pre.count(']'))): end = idx break return end val = val.strip('\'"').replace(' ', '') is_tuple = False if (val.startswith('(') and val.endswith(')')): is_tuple = True val = val[1:(- 1)] elif (val.startswith('[') and val.endswith(']')): val = val[1:(- 1)] elif (',' not in val): return DictAction._parse_int_float_bool(val) values = [] while (len(val) > 0): comma_idx = find_next_comma(val) element = DictAction._parse_iterable(val[:comma_idx]) values.append(element) val = val[(comma_idx + 1):] if is_tuple: values = tuple(values) return values def __call__(self, parser, namespace, values, option_string=None): options = {} for kv in values: (key, val) = kv.split('=', maxsplit=1) options[key] = self._parse_iterable(val) setattr(namespace, self.dest, options)
def collect_env(): 'Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, "n/a" if GCC is not installed.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n ' env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for (name, device_ids) in devices.items(): env_info[('GPU ' + ','.join(device_ids))] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V | tail -n1', shell=True) nvcc = nvcc.decode('utf-8').strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: gcc = subprocess.check_output('gcc --version | head -n1', shell=True) gcc = gcc.decode('utf-8').strip() env_info['GCC'] = gcc except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info
def check_ops_exist(): ext_loader = pkgutil.find_loader('mmcv._ext') return (ext_loader is not None)
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): 'Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n "Error" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to \'w\'.\n\n Returns:\n logging.Logger: The expected logger.\n ' logger = logging.getLogger(name) if (name in logger_initialized): return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger for handler in logger.root.handlers: if (type(handler) is logging.StreamHandler): handler.setLevel(logging.ERROR) stream_handler = logging.StreamHandler() handlers = [stream_handler] if (dist.is_available() and dist.is_initialized()): rank = dist.get_rank() else: rank = 0 if ((rank == 0) and (log_file is not None)): file_handler = logging.FileHandler(log_file, file_mode) handlers.append(file_handler) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') for handler in handlers: handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) if (rank == 0): logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger
def print_log(msg, logger=None, level=logging.INFO): 'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - "silent": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n ' if (logger is None): print(msg) elif isinstance(logger, logging.Logger): logger.log(level, msg) elif (logger == 'silent'): pass elif isinstance(logger, str): _logger = get_logger(logger) _logger.log(level, msg) else: raise TypeError(f'logger should be either a logging.Logger object, str, "silent" or None, but got {type(logger)}')
def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
def is_str(x): 'Whether the input is an string instance.\n\n Note: This method is deprecated since python 2 is no longer supported.\n ' return isinstance(x, str)
def import_modules_from_strings(imports, allow_failed_imports=False): "Import modules from the given list of strings.\n\n Args:\n imports (list | str | None): The given module names to be imported.\n allow_failed_imports (bool): If True, the failed imports will return\n None. Otherwise, an ImportError is raise. Default: False.\n\n Returns:\n list[module] | module | None: The imported modules.\n\n Examples:\n >>> osp, sys = import_modules_from_strings(\n ... ['os.path', 'sys'])\n >>> import os.path as osp_\n >>> import sys as sys_\n >>> assert osp == osp_\n >>> assert sys == sys_\n " if (not imports): return single_import = False if isinstance(imports, str): single_import = True imports = [imports] if (not isinstance(imports, list)): raise TypeError(f'custom_imports must be a list but got type {type(imports)}') imported = [] for imp in imports: if (not isinstance(imp, str)): raise TypeError(f'{imp} is of type {type(imp)} and cannot be imported.') try: imported_tmp = import_module(imp) except ImportError: if allow_failed_imports: warnings.warn(f'{imp} failed to import and is ignored.', UserWarning) imported_tmp = None else: raise ImportError imported.append(imported_tmp) if single_import: imported = imported[0] return imported
def iter_cast(inputs, dst_type, return_type=None): 'Cast elements of an iterable object into some type.\n\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be\n converted to this type, otherwise an iterator.\n\n Returns:\n iterator or specified type: The converted object.\n ' if (not isinstance(inputs, abc.Iterable)): raise TypeError('inputs must be an iterable object') if (not isinstance(dst_type, type)): raise TypeError('"dst_type" must be a valid type') out_iterable = map(dst_type, inputs) if (return_type is None): return out_iterable else: return return_type(out_iterable)
def list_cast(inputs, dst_type): 'Cast elements of an iterable object into a list of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=list)
def tuple_cast(inputs, dst_type): 'Cast elements of an iterable object into a tuple of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=tuple)
def is_seq_of(seq, expected_type, seq_type=None): 'Check whether it is a sequence of some type.\n\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n\n Returns:\n bool: Whether the sequence is valid.\n ' if (seq_type is None): exp_seq_type = abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if (not isinstance(seq, exp_seq_type)): return False for item in seq: if (not isinstance(item, expected_type)): return False return True
def is_list_of(seq, expected_type): 'Check whether it is a list of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=list)
def is_tuple_of(seq, expected_type): 'Check whether it is a tuple of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=tuple)
def slice_list(in_list, lens): 'Slice a list into several sub lists by a list of given length.\n\n Args:\n in_list (list): The list to be sliced.\n lens(int or list): The expected length of each out list.\n\n Returns:\n list: A list of sliced list.\n ' if isinstance(lens, int): assert ((len(in_list) % lens) == 0) lens = ([lens] * int((len(in_list) / lens))) if (not isinstance(lens, list)): raise TypeError('"indices" must be an integer or a list of integers') elif (sum(lens) != len(in_list)): raise ValueError(f'sum of lens and list length does not match: {sum(lens)} != {len(in_list)}') out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:(idx + lens[i])]) idx += lens[i] return out_list
def concat_list(in_list): 'Concatenate a list of list into a single list.\n\n Args:\n in_list (list): The list of list to be merged.\n\n Returns:\n list: The concatenated flat list.\n ' return list(itertools.chain(*in_list))
def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not found, please install them first.'): 'A decorator factory to check if prerequisites are satisfied.\n\n Args:\n prerequisites (str of list[str]): Prerequisites to be checked.\n checker (callable): The checker method that returns True if a\n prerequisite is meet, False otherwise.\n msg_tmpl (str): The message template with two variables.\n\n Returns:\n decorator: A specific decorator.\n ' def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites) missing = [] for item in requirements: if (not checker(item)): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
def _check_py_package(package): try: import_module(package) except ImportError: return False else: return True
def _check_executable(cmd): if (subprocess.call(f'which {cmd}', shell=True) != 0): return False else: return True
def requires_package(prerequisites): "A decorator to check if some python packages are installed.\n\n Example:\n >>> @requires_package('numpy')\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n array([0.])\n >>> @requires_package(['numpy', 'non_package'])\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n ImportError\n " return check_prerequisites(prerequisites, checker=_check_py_package)
def requires_executable(prerequisites): "A decorator to check if some executable files are installed.\n\n Example:\n >>> @requires_executable('ffmpeg')\n >>> func(arg1, args):\n >>> print(1)\n 1\n " return check_prerequisites(prerequisites, checker=_check_executable)
def deprecated_api_warning(name_dict, cls_name=None): 'A decorator to check if some arguments are deprecate and try to replace\n deprecate src_arg_name to dst_arg_name.\n\n Args:\n name_dict(dict):\n key (str): Deprecate argument names.\n val (str): Expected argument names.\n\n Returns:\n func: New function.\n ' def api_warning_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): args_info = getfullargspec(old_func) func_name = old_func.__name__ if (cls_name is not None): func_name = f'{cls_name}.{func_name}' if args: arg_names = args_info.args[:len(args)] for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in arg_names): warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) arg_names[arg_names.index(src_arg_name)] = dst_arg_name if kwargs: for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in kwargs): assert (dst_arg_name not in kwargs), f'The expected behavior is to replace the deprecated key `{src_arg_name}` to new key `{dst_arg_name}`, but got them in the arguments at the same time, which is confusing. `{src_arg_name} will be deprecated in the future, please use `{dst_arg_name}` instead.' warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) kwargs[dst_arg_name] = kwargs.pop(src_arg_name) output = old_func(*args, **kwargs) return output return new_func return api_warning_wrapper
def is_method_overridden(method, base_class, derived_class): 'Check if a method of base class is overridden in derived class.\n\n Args:\n method (str): the method name to check.\n base_class (type): the class of the base class.\n derived_class (type | Any): the class or instance of the derived class.\n ' assert isinstance(base_class, type), "base_class doesn't accept instance, Please pass class instead." if (not isinstance(derived_class, type)): derived_class = derived_class.__class__ base_method = getattr(base_class, method) derived_method = getattr(derived_class, method) return (derived_method != base_method)
def has_method(obj: object, method: str) -> bool: 'Check whether the object has a method.\n\n Args:\n method (str): The method name to check.\n obj (object): The object to check.\n\n Returns:\n bool: True if the object has the method else False.\n ' return (hasattr(obj, method) and callable(getattr(obj, method)))
def is_rocm_pytorch() -> bool: is_rocm = False if (TORCH_VERSION != 'parrots'): try: from torch.utils.cpp_extension import ROCM_HOME is_rocm = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass return is_rocm
def _get_cuda_home(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import CUDA_HOME elif is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME CUDA_HOME = ROCM_HOME else: from torch.utils.cpp_extension import CUDA_HOME return CUDA_HOME
def get_build_config(): if (TORCH_VERSION == 'parrots'): from parrots.config import get_build_info return get_build_info() else: return torch.__config__.show()
def _get_conv(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return (_ConvNd, _ConvTransposeMixin)
def _get_dataloader(): if (TORCH_VERSION == 'parrots'): from torch.utils.data import DataLoader, PoolDataLoader else: from torch.utils.data import DataLoader PoolDataLoader = DataLoader return (DataLoader, PoolDataLoader)
def _get_extension(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import BuildExtension, Extension CppExtension = partial(Extension, cuda=False) CUDAExtension = partial(Extension, cuda=True) else: from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension return (BuildExtension, CppExtension, CUDAExtension)
def _get_pool(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.pool import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd else: from torch.nn.modules.pooling import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd return (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd)
def _get_norm(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm2d else: from torch.nn.modules.batchnorm import _BatchNorm from torch.nn.modules.instancenorm import _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm return (_BatchNorm, _InstanceNorm, SyncBatchNorm_)
class SyncBatchNorm(SyncBatchNorm_): def _check_input_dim(self, input): if (TORCH_VERSION == 'parrots'): if (input.dim() < 2): raise ValueError(f'expected at least 2D input (got {input.dim()}D input)') else: super()._check_input_dim(input)
def is_filepath(x): return (is_str(x) or isinstance(x, Path))
def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs) raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if (not osp.isfile(filename)): raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=511): if (dir_name == ''): return dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs): if (os.path.lexists(dst) and overwrite): os.remove(dst) os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): 'Scan a directory to find the interested files.\n\n Args:\n dir_path (str | :obj:`Path`): Path of the directory.\n suffix (str | tuple(str), optional): File suffix that we are\n interested in. Default: None.\n recursive (bool, optional): If set to True, recursively scan the\n directory. Default: False.\n case_sensitive (bool, optional) : If set to False, ignore the case of\n suffix. Default: True.\n\n Returns:\n A generator for all the interested files with relative paths.\n ' if isinstance(dir_path, (str, Path)): dir_path = str(dir_path) else: raise TypeError('"dir_path" must be a string or Path object') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('"suffix" must be a string or tuple of strings') if ((suffix is not None) and (not case_sensitive)): suffix = (suffix.lower() if isinstance(suffix, str) else tuple((item.lower() for item in suffix))) root = dir_path def _scandir(dir_path, suffix, recursive, case_sensitive): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): rel_path = osp.relpath(entry.path, root) _rel_path = (rel_path if case_sensitive else rel_path.lower()) if ((suffix is None) or _rel_path.endswith(suffix)): (yield rel_path) elif (recursive and os.path.isdir(entry.path)): (yield from _scandir(entry.path, suffix, recursive, case_sensitive)) return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git',)): 'Finds the root directory (including itself) of specified markers.\n\n Args:\n path (str): Path of directory or file.\n markers (list[str], optional): List of file or directory names.\n\n Returns:\n The directory contained one of the markers or None if not found.\n ' if osp.isfile(path): path = osp.dirname(path) (prev, cur) = (None, osp.abspath(osp.expanduser(path))) while (cur != prev): if any((osp.exists(osp.join(cur, marker)) for marker in markers)): return cur (prev, cur) = (cur, osp.split(cur)[0]) return None
class ProgressBar(): 'A progress bar which can print the progress.' def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): self.task_num = task_num self.bar_width = bar_width self.completed = 0 self.file = file if start: self.start() @property def terminal_width(self): (width, _) = get_terminal_size() return width def start(self): if (self.task_num > 0): self.file.write(f"[{(' ' * self.bar_width)}] 0/{self.task_num}, elapsed: 0s, ETA:") else: self.file.write('completed: 0, elapsed: 0s') self.file.flush() self.timer = Timer() def update(self, num_tasks=1): assert (num_tasks > 0) self.completed += num_tasks elapsed = self.timer.since_start() if (elapsed > 0): fps = (self.completed / elapsed) else: fps = float('inf') if (self.task_num > 0): percentage = (self.completed / float(self.task_num)) eta = int((((elapsed * (1 - percentage)) / percentage) + 0.5)) msg = f''' [{{}}] {self.completed}/{self.task_num}, {fps:.1f} task/s, elapsed: {int((elapsed + 0.5))}s, ETA: {eta:5}s''' bar_width = min(self.bar_width, (int((self.terminal_width - len(msg))) + 2), int((self.terminal_width * 0.6))) bar_width = max(2, bar_width) mark_width = int((bar_width * percentage)) bar_chars = (('>' * mark_width) + (' ' * (bar_width - mark_width))) self.file.write(msg.format(bar_chars)) else: self.file.write(f'completed: {self.completed}, elapsed: {int((elapsed + 0.5))}s, {fps:.1f} tasks/s') self.file.flush()
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): 'Track the progress of tasks execution with a progress bar.\n\n Tasks are done with a simple for-loop.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() prog_bar.file.write('\n') return results
def init_pool(process_num, initializer=None, initargs=None): if (initializer is None): return Pool(process_num) elif (initargs is None): return Pool(process_num, initializer) else: if (not isinstance(initargs, tuple)): raise TypeError('"initargs" must be a tuple') return Pool(process_num, initializer, initargs)
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): 'Track the progress of parallel task execution with a progress bar.\n\n The built-in :mod:`multiprocessing` module is used for process pools and\n tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n nproc (int): Process (worker) number.\n initializer (None or callable): Refer to :class:`multiprocessing.Pool`\n for details.\n initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for\n details.\n chunksize (int): Refer to :class:`multiprocessing.Pool` for details.\n bar_width (int): Width of progress bar.\n skip_first (bool): Whether to skip the first sample for each worker\n when estimating fps, since the initialization step may takes\n longer.\n keep_order (bool): If True, :func:`Pool.imap` is used, otherwise\n :func:`Pool.imap_unordered` is used.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = (not skip_first) task_num -= ((nproc * chunksize) * int(skip_first)) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if (len(results) < (nproc * chunksize)): continue elif (len(results) == (nproc * chunksize)): prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results
def track_iter_progress(tasks, bar_width=50, file=sys.stdout): 'Track the progress of tasks iteration or enumeration with a progress\n bar.\n\n Tasks are yielded with a simple for-loop.\n\n Args:\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Yields:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) for task in tasks: (yield task) prog_bar.update() prog_bar.file.write('\n')
def build_from_cfg(cfg, registry, default_args=None): 'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n object: The constructed object.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): if ((default_args is None) or ('type' not in default_args)): raise KeyError(f'''`cfg` or `default_args` must contain the key "type", but got {cfg} {default_args}''') if (not isinstance(registry, Registry)): raise TypeError(f'registry must be an mmcv.Registry object, but got {type(registry)}') if (not (isinstance(default_args, dict) or (default_args is None))): raise TypeError(f'default_args must be a dict or None, but got {type(default_args)}') args = cfg.copy() if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) obj_type = args.pop('type') if isinstance(obj_type, str): obj_cls = registry.get(obj_type) if (obj_cls is None): raise KeyError(f'{obj_type} is not in the {registry.name} registry') elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') try: return obj_cls(**args) except Exception as e: raise type(e)(f'{obj_cls.__name__}: {e}')
class Registry(): "A registry to map strings to classes.\n\n Registered object could be built from registry.\n\n Example:\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = MODELS.build(dict(type='ResNet'))\n\n Please refer to\n https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for\n advanced usage.\n\n Args:\n name (str): Registry name.\n build_func(func, optional): Build function to construct instance from\n Registry, func:`build_from_cfg` is used if neither ``parent`` or\n ``build_func`` is specified. If ``parent`` is specified and\n ``build_func`` is not given, ``build_func`` will be inherited\n from ``parent``. Default: None.\n parent (Registry, optional): Parent registry. The class registered in\n children registry could be built from parent. Default: None.\n scope (str, optional): The scope of registry. It is the key to search\n for children registry. If not specified, scope will be the name of\n the package where class is defined, e.g. mmdet, mmcls, mmseg.\n Default: None.\n " def __init__(self, name, build_func=None, parent=None, scope=None): self._name = name self._module_dict = dict() self._children = dict() self._scope = (self.infer_scope() if (scope is None) else scope) if (build_func is None): if (parent is not None): self.build_func = parent.build_func else: self.build_func = build_from_cfg else: self.build_func = build_func if (parent is not None): assert isinstance(parent, Registry) parent._add_children(self) self.parent = parent else: self.parent = None def __len__(self): return len(self._module_dict) def __contains__(self, key): return (self.get(key) is not None) def __repr__(self): format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})') return format_str @staticmethod def infer_scope(): "Infer the scope of registry.\n\n The name of the package where registry is defined will be returned.\n\n Example:\n >>> # in mmdet/models/backbone/resnet.py\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n The scope of ``ResNet`` will be ``mmdet``.\n\n Returns:\n str: The inferred scope name.\n " filename = inspect.getmodule(inspect.stack()[2][0]).__name__ split_filename = filename.split('.') return split_filename[0] @staticmethod def split_scope_key(key): "Split scope and key.\n\n The first scope will be split from key.\n\n Examples:\n >>> Registry.split_scope_key('mmdet.ResNet')\n 'mmdet', 'ResNet'\n >>> Registry.split_scope_key('ResNet')\n None, 'ResNet'\n\n Return:\n tuple[str | None, str]: The former element is the first scope of\n the key, which can be ``None``. The latter is the remaining key.\n " split_index = key.find('.') if (split_index != (- 1)): return (key[:split_index], key[(split_index + 1):]) else: return (None, key) @property def name(self): return self._name @property def scope(self): return self._scope @property def module_dict(self): return self._module_dict @property def children(self): return self._children def get(self, key): 'Get the registry record.\n\n Args:\n key (str): The class name in string format.\n\n Returns:\n class: The corresponding class.\n ' (scope, real_key) = self.split_scope_key(key) if ((scope is None) or (scope == self._scope)): if (real_key in self._module_dict): return self._module_dict[real_key] elif (scope in self._children): return self._children[scope].get(real_key) else: parent = self.parent while (parent.parent is not None): parent = parent.parent return parent.get(key) def build(self, *args, **kwargs): return self.build_func(*args, **kwargs, registry=self) def _add_children(self, registry): "Add children for a registry.\n\n The ``registry`` will be added as children based on its scope.\n The parent registry could build objects from children registry.\n\n Example:\n >>> models = Registry('models')\n >>> mmdet_models = Registry('models', parent=models)\n >>> @mmdet_models.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = models.build(dict(type='mmdet.ResNet'))\n " assert isinstance(registry, Registry) assert (registry.scope is not None) assert (registry.scope not in self.children), f'scope {registry.scope} exists in {self.name} registry' self.children[registry.scope] = registry def _register_module(self, module_class, module_name=None, force=False): if (not inspect.isclass(module_class)): raise TypeError(f'module must be a class, but got {type(module_class)}') if (module_name is None): module_name = module_class.__name__ if isinstance(module_name, str): module_name = [module_name] for name in module_name: if ((not force) and (name in self._module_dict)): raise KeyError(f'{name} is already registered in {self.name}') self._module_dict[name] = module_class def deprecated_register_module(self, cls=None, force=False): warnings.warn('The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.', DeprecationWarning) if (cls is None): return partial(self.deprecated_register_module, force=force) self._register_module(cls, force=force) return cls def register_module(self, name=None, force=False, module=None): "Register a module.\n\n A record will be added to `self._module_dict`, whose key is the class\n name or the specified name, and value is the class itself.\n It can be used as a decorator or a normal function.\n\n Example:\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module()\n >>> class ResNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module(name='mnet')\n >>> class MobileNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> class ResNet:\n >>> pass\n >>> backbones.register_module(ResNet)\n\n Args:\n name (str | None): The module name to be registered. If not\n specified, the class name will be used.\n force (bool, optional): Whether to override an existing class with\n the same name. Default: False.\n module (type): Module class to be registered.\n " if (not isinstance(force, bool)): raise TypeError(f'force must be a boolean, but got {type(force)}') if isinstance(name, type): return self.deprecated_register_module(name, force=force) if (not ((name is None) or isinstance(name, str) or is_seq_of(name, str))): raise TypeError(f'name must be either of None, an instance of str or a sequence of str, but got {type(name)}') if (module is not None): self._register_module(module_class=module, module_name=name, force=force) return module def _register(cls): self._register_module(module_class=cls, module_name=name, force=force) return cls return _register
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int): 'Function to initialize each worker.\n\n The seed of each worker equals to\n ``num_worker * rank + worker_id + user_seed``.\n\n Args:\n worker_id (int): Id for each worker.\n num_workers (int): Number of workers.\n rank (int): Rank in distributed training.\n seed (int): Random seed.\n ' worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed) torch.manual_seed(worker_seed)
def check_python_script(cmd): 'Run the python cmd script with `__main__`. The difference between\n `os.system` is that, this function exectues code in the current process, so\n that it can be tracked by coverage tools. Currently it supports two forms:\n\n - ./tests/data/scripts/hello.py zz\n - python tests/data/scripts/hello.py zz\n ' args = split(cmd) if (args[0] == 'python'): args = args[1:] with patch.object(sys, 'argv', args): run_path(args[0], run_name='__main__')
def _any(judge_result): 'Since built-in ``any`` works only when the element of iterable is not\n iterable, implement the function.' if (not isinstance(judge_result, Iterable)): return judge_result try: for element in judge_result: if _any(element): return True except TypeError: if judge_result: return True return False
def assert_dict_contains_subset(dict_obj: Dict[(Any, Any)], expected_subset: Dict[(Any, Any)]) -> bool: 'Check if the dict_obj contains the expected_subset.\n\n Args:\n dict_obj (Dict[Any, Any]): Dict object to be checked.\n expected_subset (Dict[Any, Any]): Subset expected to be contained in\n dict_obj.\n\n Returns:\n bool: Whether the dict_obj contains the expected_subset.\n ' for (key, value) in expected_subset.items(): if ((key not in dict_obj.keys()) or _any((dict_obj[key] != value))): return False return True
def assert_attrs_equal(obj: Any, expected_attrs: Dict[(str, Any)]) -> bool: 'Check if attribute of class object is correct.\n\n Args:\n obj (object): Class object to be checked.\n expected_attrs (Dict[str, Any]): Dict of the expected attrs.\n\n Returns:\n bool: Whether the attribute of class object is correct.\n ' for (attr, value) in expected_attrs.items(): if ((not hasattr(obj, attr)) or _any((getattr(obj, attr) != value))): return False return True
def assert_dict_has_keys(obj: Dict[(str, Any)], expected_keys: List[str]) -> bool: 'Check if the obj has all the expected_keys.\n\n Args:\n obj (Dict[str, Any]): Object to be checked.\n expected_keys (List[str]): Keys expected to contained in the keys of\n the obj.\n\n Returns:\n bool: Whether the obj has the expected keys.\n ' return set(expected_keys).issubset(set(obj.keys()))
def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool: 'Check if target_keys is equal to result_keys.\n\n Args:\n result_keys (List[str]): Result keys to be checked.\n target_keys (List[str]): Target keys to be checked.\n\n Returns:\n bool: Whether target_keys is equal to result_keys.\n ' return (set(result_keys) == set(target_keys))
def assert_is_norm_layer(module) -> bool: 'Check if the module is a norm layer.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the module is a norm layer.\n ' from torch.nn import GroupNorm, LayerNorm from .parrots_wrapper import _BatchNorm, _InstanceNorm norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm) return isinstance(module, norm_layer_candidates)
def assert_params_all_zeros(module) -> bool: 'Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ' weight_data = module.weight.data is_weight_zero = weight_data.allclose(weight_data.new_zeros(weight_data.size())) if (hasattr(module, 'bias') and (module.bias is not None)): bias_data = module.bias.data is_bias_zero = bias_data.allclose(bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return (is_weight_zero and is_bias_zero)
class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message)
class Timer(): "A flexible Timer class.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> with mmcv.Timer():\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n 1.000\n >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n it takes 1.0 seconds\n >>> timer = mmcv.Timer()\n >>> time.sleep(0.5)\n >>> print(timer.since_start())\n 0.500\n >>> time.sleep(0.5)\n >>> print(timer.since_last_check())\n 0.500\n >>> print(timer.since_start())\n 1.000\n " def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() @property def is_running(self): 'bool: indicate whether the timer is running' return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): 'Start the timer.' if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): 'Total time since the timer is started.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): 'Time since the last checking.\n\n Either :func:`since_start` or :func:`since_last_check` is a checking\n operation.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur
def check_time(timer_id): "Add check points in a single line.\n\n This method is suitable for running a task on a list of items. A timer will\n be registered when the method is called for the first time.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> for i in range(1, 6):\n >>> # simulate a code block\n >>> time.sleep(i)\n >>> mmcv.check_time('task1')\n 2.000\n 3.000\n 4.000\n 5.000\n\n Args:\n str: Timer identifier.\n " if (timer_id not in _g_timers): _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
def is_jit_tracing() -> bool: if ((torch.__version__ != 'parrots') and (digit_version(torch.__version__) >= digit_version('1.6.0'))): on_trace = torch.jit.is_tracing() if isinstance(on_trace, bool): return on_trace else: return torch._C._is_tracing() else: warnings.warn('torch.jit.is_tracing is only supported after v1.6.0. Therefore is_tracing returns False automatically. Please set on_trace manually if you are using trace.', UserWarning) return False
def digit_version(version_str: str, length: int=4): 'Convert a version string into a tuple of integers.\n\n This method is usually used for comparing two versions. For pre-release\n versions: alpha < beta < rc.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int]: The version info in digits (integers).\n ' assert ('parrots' not in version_str) version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: mapping = {'a': (- 3), 'b': (- 2), 'rc': (- 1)} val = (- 4) if version.pre: if (version.pre[0] not in mapping): warnings.warn(f'unknown prerelease version {version.pre[0]}, version checking may go wrong') else: val = mapping[version.pre[0]] release.extend([val, version.pre[(- 1)]]) else: release.extend([val, 0]) elif version.is_postrelease: release.extend([1, version.post]) else: release.extend([0, 0]) return tuple(release)
def _minimal_ext_cmd(cmd): env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if (v is not None): env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out
def get_git_hash(fallback='unknown', digits=None): "Get the git hash of the current repo.\n\n Args:\n fallback (str, optional): The fallback string when git hash is\n unavailable. Defaults to 'unknown'.\n digits (int, optional): kept digits of the hash. Defaults to None,\n meaning all digits are kept.\n\n Returns:\n str: Git commit hash.\n " if ((digits is not None) and (not isinstance(digits, int))): raise TypeError('digits must be None or an integer') try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') if (digits is not None): sha = sha[:digits] except OSError: sha = fallback return sha
def parse_version_info(version_str: str, length: int=4) -> tuple: 'Parse a version string into a tuple.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int | str]: The version info, e.g., "1.3.0" is parsed into\n (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into\n (2, 0, 0, 0, \'rc\', 1) (when length is set to 4).\n ' from packaging.version import parse version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: release.extend(list(version.pre)) elif version.is_postrelease: release.extend(list(version.post)) else: release.extend([0, 0]) return tuple(release)
class Cache(): def __init__(self, capacity): self._cache = OrderedDict() self._capacity = int(capacity) if (capacity <= 0): raise ValueError('capacity must be a positive integer') @property def capacity(self): return self._capacity @property def size(self): return len(self._cache) def put(self, key, val): if (key in self._cache): return if (len(self._cache) >= self.capacity): self._cache.popitem(last=False) self._cache[key] = val def get(self, key, default=None): val = (self._cache[key] if (key in self._cache) else default) return val
class VideoReader(): "Video class with similar usage to a list object.\n\n This video warpper class provides convenient apis to access frames.\n There exists an issue of OpenCV's VideoCapture class that jumping to a\n certain frame may be inaccurate. It is fixed in this class by checking\n the position after jumping each time.\n Cache is used when decoding videos. So if the same frame is visited for\n the second time, there is no need to decode again if it is stored in the\n cache.\n\n Examples:\n >>> import mmcv\n >>> v = mmcv.VideoReader('sample.mp4')\n >>> len(v) # get the total frame number with `len()`\n 120\n >>> for img in v: # v is iterable\n >>> mmcv.imshow(img)\n >>> v[5] # get the 6th frame\n " def __init__(self, filename, cache_capacity=10): if (not filename.startswith(('https://', 'http://'))): check_file_exist(filename, ('Video file not found: ' + filename)) self._vcap = cv2.VideoCapture(filename) assert (cache_capacity > 0) self._cache = Cache(cache_capacity) self._position = 0 self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) self._fps = self._vcap.get(CAP_PROP_FPS) self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) self._fourcc = self._vcap.get(CAP_PROP_FOURCC) @property def vcap(self): ':obj:`cv2.VideoCapture`: The raw VideoCapture object.' return self._vcap @property def opened(self): 'bool: Indicate whether the video is opened.' return self._vcap.isOpened() @property def width(self): 'int: Width of video frames.' return self._width @property def height(self): 'int: Height of video frames.' return self._height @property def resolution(self): 'tuple: Video resolution (width, height).' return (self._width, self._height) @property def fps(self): 'float: FPS of the video.' return self._fps @property def frame_cnt(self): 'int: Total frames of the video.' return self._frame_cnt @property def fourcc(self): 'str: "Four character code" of the video.' return self._fourcc @property def position(self): 'int: Current cursor position, indicating frame decoded.' return self._position def _get_real_position(self): return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) def _set_real_position(self, frame_id): self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) pos = self._get_real_position() for _ in range((frame_id - pos)): self._vcap.read() self._position = frame_id def read(self): 'Read the next frame.\n\n If the next frame have been decoded before and in the cache, then\n return it directly, otherwise decode, cache and return it.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if self._cache: img = self._cache.get(self._position) if (img is not None): ret = True else: if (self._position != self._get_real_position()): self._set_real_position(self._position) (ret, img) = self._vcap.read() if ret: self._cache.put(self._position, img) else: (ret, img) = self._vcap.read() if ret: self._position += 1 return img def get_frame(self, frame_id): 'Get frame by index.\n\n Args:\n frame_id (int): Index of the expected frame, 0-based.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if ((frame_id < 0) or (frame_id >= self._frame_cnt)): raise IndexError(f'"frame_id" must be between 0 and {(self._frame_cnt - 1)}') if (frame_id == self._position): return self.read() if self._cache: img = self._cache.get(frame_id) if (img is not None): self._position = (frame_id + 1) return img self._set_real_position(frame_id) (ret, img) = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img def current_frame(self): 'Get the current frame (frame that is just visited).\n\n Returns:\n ndarray or None: If the video is fresh, return None, otherwise\n return the frame.\n ' if (self._position == 0): return None return self._cache.get((self._position - 1)) def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): 'Convert a video to frame images.\n\n Args:\n frame_dir (str): Output directory to store all the frame images.\n file_start (int): Filenames will start from the specified number.\n filename_tmpl (str): Filename template with the index as the\n placeholder.\n start (int): The starting frame index.\n max_num (int): Maximum number of frames to be written.\n show_progress (bool): Whether to show a progress bar.\n ' mkdir_or_exist(frame_dir) if (max_num == 0): task_num = (self.frame_cnt - start) else: task_num = min((self.frame_cnt - start), max_num) if (task_num <= 0): raise ValueError('start must be less than total frame number') if (start > 0): self._set_real_position(start) def write_frame(file_idx): img = self.read() if (img is None): return filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, (file_start + task_num))) else: for i in range(task_num): write_frame((file_start + i)) def __len__(self): return self.frame_cnt def __getitem__(self, index): if isinstance(index, slice): return [self.get_frame(i) for i in range(*index.indices(self.frame_cnt))] if (index < 0): index += self.frame_cnt if (index < 0): raise IndexError('index out of range') return self.get_frame(index) def __iter__(self): self._set_real_position(0) return self def __next__(self): img = self.read() if (img is not None): return img else: raise StopIteration next = __next__ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._vcap.release()
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): 'Read the frame images from a directory and join them as a video.\n\n Args:\n frame_dir (str): The directory containing video frames.\n video_file (str): Output filename.\n fps (float): FPS of the output video.\n fourcc (str): Fourcc of the output video, this should be compatible\n with the output file type.\n filename_tmpl (str): Filename template with the index as the variable.\n start (int): Starting frame index.\n end (int): Ending frame index.\n show_progress (bool): Whether to show a progress bar.\n ' if (end == 0): ext = filename_tmpl.split('.')[(- 1)] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, ('The start frame not found: ' + first_file)) img = cv2.imread(first_file) (height, width) = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): write_frame(i) vwriter.release()
@requires_executable('ffmpeg') def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): 'Convert a video with ffmpeg.\n\n This provides a general api to ffmpeg, the executed command is::\n\n `ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`\n\n Options(kwargs) are mapped to ffmpeg commands with the following rules:\n\n - key=val: "-key val"\n - key=True: "-key"\n - key=False: ""\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n pre_options (str): Options appears before "-i <in_file>".\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = [] for (k, v) in kwargs.items(): if isinstance(v, bool): if v: options.append(f'-{k}') elif (k == 'log_level'): assert (v in ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace']) options.append(f'-loglevel {v}') else: options.append(f'-{k} {v}') cmd = f"ffmpeg -y {pre_options} -i {in_file} {' '.join(options)} {out_file}" if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
@requires_executable('ffmpeg') def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False): 'Resize a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\n ratio (tuple or float): Expected resize ratio, (2, 0.5) means\n (w*2, h*0.5).\n keep_ar (bool): Whether to keep original aspect ratio.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' if ((size is None) and (ratio is None)): raise ValueError('expected size or ratio must be specified') if ((size is not None) and (ratio is not None)): raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if (not keep_ar): options['vf'] = f'scale={size[0]}:{size[1]}' else: options['vf'] = f'scale=w={size[0]}:h={size[1]}:force_original_aspect_ratio=decrease' else: if (not isinstance(ratio, tuple)): ratio = (ratio, ratio) options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"' convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Cut a clip from a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n start (None or float): Start time (in seconds).\n end (None or float): End time (in seconds).\n vcodec (None or str): Output video codec, None for unchanged.\n acodec (None or str): Output audio codec, None for unchanged.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = (end - start) convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Concatenate multiple videos into a single one.\n\n Args:\n video_list (list): A list of video filenames\n out_file (str): Output video filename\n vcodec (None or str): Output video codec, None for unchanged\n acodec (None or str): Output audio codec, None for unchanged\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' (tmp_filehandler, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write(f'''file {osp.abspath(filename)} ''') options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.close(tmp_filehandler) os.remove(tmp_filename)
class Color(Enum): 'An enum that defines common colors.\n\n Contains red, green, blue, cyan, yellow, magenta, white and black.\n ' red = (0, 0, 255) green = (0, 255, 0) blue = (255, 0, 0) cyan = (255, 255, 0) yellow = (0, 255, 255) magenta = (255, 0, 255) white = (255, 255, 255) black = (0, 0, 0)
def color_val(color): 'Convert various input to color tuples.\n\n Args:\n color (:obj:`Color`/str/tuple/int/ndarray): Color inputs\n\n Returns:\n tuple[int]: A tuple of 3 integers indicating BGR channels.\n ' if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert (len(color) == 3) for channel in color: assert (0 <= channel <= 255) return color elif isinstance(color, int): assert (0 <= color <= 255) return (color, color, color) elif isinstance(color, np.ndarray): assert ((color.ndim == 1) and (color.size == 3)) assert np.all(((color >= 0) & (color <= 255))) color = color.astype(np.uint8) return tuple(color) else: raise TypeError(f'Invalid type for color: {type(color)}')
def choose_requirement(primary, secondary): 'If some version of primary requirement installed, return primary, else\n return secondary.' try: name = re.split('[!<>=]', primary)[0] get_distribution(name) except DistributionNotFound: return secondary return str(primary)
def get_version(): version_file = 'mmcv/version.py' with open(version_file, 'r', encoding='utf-8') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True): 'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n ' import sys from os.path import exists require_fpath = fname def parse_line(line): 'Parse information from a line in a requirements text file.' if line.startswith('-r '): target = line.split(' ')[1] for info in parse_require_file(target): (yield info) else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] else: pat = (('(' + '|'.join(['>=', '==', '>'])) + ')') parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if (len(parts) > 1): (op, rest) = parts[1:] if (';' in rest): (version, platform_deps) = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest info['version'] = (op, version) (yield info) def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if (line and (not line.startswith('#'))): for info in parse_line(line): (yield info) def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if (with_version and ('version' in info)): parts.extend(info['version']) if (not sys.version.startswith('3.4')): platform_deps = info.get('platform_deps') if (platform_deps is not None): parts.append((';' + platform_deps)) item = ''.join(parts) (yield item) packages = list(gen_packages_items()) return packages
def get_extensions(): extensions = [] if (os.getenv('MMCV_WITH_TRT', '0') != '0'): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom TensorRT Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_trt' from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] tensorrt_path = os.getenv('TENSORRT_DIR', '0') tensorrt_lib_path = glob.glob(os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0] library_dirs += [tensorrt_lib_path] libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin'] libraries += ['cudart'] define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda') include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt') include_dirs.append(include_path) include_dirs.append(include_trt_path) include_dirs.append(os.path.join(tensorrt_path, 'include')) include_dirs += include_paths(cuda=True) op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*') define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('MMCV_WITH_TRT', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) extra_compile_args['nvcc'] += ['-Xcompiler=-fno-gnu-unique'] library_dirs += library_paths(cuda=True) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) if (os.getenv('MMCV_WITH_OPS', '0') == '0'): return extensions if (EXT_TYPE == 'parrots'): ext_name = 'mmcv._ext' from parrots.utils.build_extension import Extension define_macros = [] include_dirs = [] op_files = ((glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/parrots/*.cpp')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args = {'nvcc': ([cuda_args, '-std=c++14'] if cuda_args else ['-std=c++14']), 'cxx': ['-std=c++14']} if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] extra_compile_args['nvcc'] += ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'] ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, cuda=True, pytorch=True) extensions.append(ext_ops) elif (EXT_TYPE == 'pytorch'): ext_name = 'mmcv._ext' from torch.utils.cpp_extension import CppExtension, CUDAExtension try: import psutil num_cpu = len(psutil.Process().cpu_affinity()) cpu_use = max(4, (num_cpu - 1)) except (ModuleNotFoundError, AttributeError): cpu_use = 4 os.environ.setdefault('MAX_JOBS', str(cpu_use)) define_macros = [] extra_compile_args = {'cxx': []} if (platform.system() != 'Windows'): extra_compile_args['cxx'] = ['-std=c++14'] include_dirs = [] is_rocm_pytorch = False try: from torch.utils.cpp_extension import ROCM_HOME is_rocm_pytorch = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass project_dir = 'mmcv/ops/csrc/' if is_rocm_pytorch: from torch.utils.hipify import hipify_python hipify_python.hipify(project_directory=project_dir, output_directory=project_dir, includes='mmcv/ops/csrc/*', show_detailed=True, is_pytorch_extension=True) define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('HIP_DIFF', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (glob.glob('./mmcv/ops/csrc/pytorch/hip/*') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/hip/*')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip')) elif (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (((glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) else: print(f'Compiling {ext_name} without CUDA') op_files = (glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) extension = CppExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) if (('nvcc' in extra_compile_args) and (platform.system() != 'Windows')): extra_compile_args['nvcc'] += ['-std=c++14'] ext_ops = extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args) extensions.append(ext_ops) if ((EXT_TYPE == 'pytorch') and (os.getenv('MMCV_WITH_ORT', '0') != '0')): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom ONNXRuntime Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_ort' import onnxruntime from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] ort_path = os.getenv('ONNXRUNTIME_DIR', '0') library_dirs += [os.path.join(ort_path, 'lib')] libraries.append('onnxruntime') define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime') include_dirs.append(include_path) include_dirs.append(os.path.join(ort_path, 'include')) op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*') if ((onnxruntime.get_device() == 'GPU') or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*') include_dirs += include_paths(cuda=True) library_dirs += library_paths(cuda=True) else: include_dirs += include_paths(cuda=False) library_dirs += library_paths(cuda=False) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) return extensions
def test_quantize(): arr = np.random.randn(10, 10) levels = 20 qarr = mmcv.quantize(arr, (- 1), 1, levels) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('int64')) for i in range(arr.shape[0]): for j in range(arr.shape[1]): ref = min((levels - 1), int(np.floor((10 * (1 + max(min(arr[(i, j)], 1), (- 1))))))) assert (qarr[(i, j)] == ref) qarr = mmcv.quantize(arr, (- 1), 1, 20, dtype=np.uint8) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('uint8')) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.quantize(arr, 2, 1, levels)
def test_dequantize(): levels = 20 qarr = np.random.randint(levels, size=(10, 10)) arr = mmcv.dequantize(qarr, (- 1), 1, levels) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float64')) for i in range(qarr.shape[0]): for j in range(qarr.shape[1]): assert (arr[(i, j)] == (((qarr[(i, j)] + 0.5) / 10) - 1)) arr = mmcv.dequantize(qarr, (- 1), 1, levels, dtype=np.float32) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float32')) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.dequantize(arr, 2, 1, levels)
def test_joint(): arr = np.random.randn(100, 100) levels = 1000 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert (np.abs((recover[(arr < (- 1))] + 0.999)).max() < 1e-06) assert (np.abs((recover[(arr > 1)] - 0.999)).max() < 1e-06) assert (np.abs((recover - arr)[((arr >= (- 1)) & (arr <= 1))]).max() <= 0.001) arr = np.clip((np.random.randn(100) / 1000), (- 0.01), 0.01) levels = 99 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert np.all((recover == 0))
def test_build_conv_layer(): with pytest.raises(TypeError): cfg = 'Conv2d' build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(kernel_size=3) build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyConv') build_conv_layer(cfg) kwargs = dict(in_channels=4, out_channels=8, kernel_size=3, groups=2, dilation=2) cfg = None layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='Conv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='deconv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) kwargs.pop('groups') for (type_name, module) in CONV_LAYERS.module_dict.items(): cfg = dict(type=type_name) if ((type_name == 'SparseInverseConv2d') or (type_name == 'SparseInverseConv3d')): kwargs.pop('dilation') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, module) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) kwargs['dilation'] = 2
def test_infer_norm_abbr(): with pytest.raises(TypeError): infer_norm_abbr(0) class MyNorm(): _abbr_ = 'mn' assert (infer_norm_abbr(MyNorm) == 'mn') class FancyBatchNorm(): pass assert (infer_norm_abbr(FancyBatchNorm) == 'bn') class FancyInstanceNorm(): pass assert (infer_norm_abbr(FancyInstanceNorm) == 'in') class FancyLayerNorm(): pass assert (infer_norm_abbr(FancyLayerNorm) == 'ln') class FancyGroupNorm(): pass assert (infer_norm_abbr(FancyGroupNorm) == 'gn') class FancyNorm(): pass assert (infer_norm_abbr(FancyNorm) == 'norm_layer')
def test_build_norm_layer(): with pytest.raises(TypeError): cfg = 'BN' build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict() build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict(type='FancyNorm') build_norm_layer(cfg, 3) with pytest.raises(AssertionError): cfg = dict(type='BN') build_norm_layer(cfg, 3, postfix=[1, 2]) with pytest.raises(AssertionError): cfg = dict(type='GN') build_norm_layer(cfg, 3) abbr_mapping = {'BN': 'bn', 'BN1d': 'bn', 'BN2d': 'bn', 'BN3d': 'bn', 'SyncBN': 'bn', 'GN': 'gn', 'LN': 'ln', 'IN': 'in', 'IN1d': 'in', 'IN2d': 'in', 'IN3d': 'in'} for (type_name, module) in NORM_LAYERS.module_dict.items(): if (type_name == 'MMSyncBN'): continue for postfix in ['_test', 1]: cfg = dict(type=type_name) if (type_name == 'GN'): cfg['num_groups'] = 2 (name, layer) = build_norm_layer(cfg, 3, postfix=postfix) assert (name == (abbr_mapping[type_name] + str(postfix))) assert isinstance(layer, module) if (type_name == 'GN'): assert (layer.num_channels == 3) assert (layer.num_groups == cfg['num_groups']) elif (type_name != 'LN'): assert (layer.num_features == 3)