code
stringlengths
17
6.64M
def is_rocm_pytorch() -> bool: is_rocm = False if (TORCH_VERSION != 'parrots'): try: from torch.utils.cpp_extension import ROCM_HOME is_rocm = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass return is_rocm
def _get_cuda_home(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import CUDA_HOME elif is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME CUDA_HOME = ROCM_HOME else: from torch.utils.cpp_extension import CUDA_HOME return CUDA_HOME
def get_build_config(): if (TORCH_VERSION == 'parrots'): from parrots.config import get_build_info return get_build_info() else: return torch.__config__.show()
def _get_conv(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return (_ConvNd, _ConvTransposeMixin)
def _get_dataloader(): if (TORCH_VERSION == 'parrots'): from torch.utils.data import DataLoader, PoolDataLoader else: from torch.utils.data import DataLoader PoolDataLoader = DataLoader return (DataLoader, PoolDataLoader)
def _get_extension(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import BuildExtension, Extension CppExtension = partial(Extension, cuda=False) CUDAExtension = partial(Extension, cuda=True) else: from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension return (BuildExtension, CppExtension, CUDAExtension)
def _get_pool(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.pool import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd else: from torch.nn.modules.pooling import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd return (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd)
def _get_norm(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm2d else: from torch.nn.modules.batchnorm import _BatchNorm from torch.nn.modules.instancenorm import _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm return (_BatchNorm, _InstanceNorm, SyncBatchNorm_)
class SyncBatchNorm(SyncBatchNorm_): def _check_input_dim(self, input): if (TORCH_VERSION == 'parrots'): if (input.dim() < 2): raise ValueError(f'expected at least 2D input (got {input.dim()}D input)') else: super()._check_input_dim(input)
def is_filepath(x): return (is_str(x) or isinstance(x, Path))
def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs) raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if (not osp.isfile(filename)): raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=511): if (dir_name == ''): return dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs): if (os.path.lexists(dst) and overwrite): os.remove(dst) os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): 'Scan a directory to find the interested files.\n\n Args:\n dir_path (str | :obj:`Path`): Path of the directory.\n suffix (str | tuple(str), optional): File suffix that we are\n interested in. Default: None.\n recursive (bool, optional): If set to True, recursively scan the\n directory. Default: False.\n case_sensitive (bool, optional) : If set to False, ignore the case of\n suffix. Default: True.\n\n Returns:\n A generator for all the interested files with relative paths.\n ' if isinstance(dir_path, (str, Path)): dir_path = str(dir_path) else: raise TypeError('"dir_path" must be a string or Path object') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('"suffix" must be a string or tuple of strings') if ((suffix is not None) and (not case_sensitive)): suffix = (suffix.lower() if isinstance(suffix, str) else tuple((item.lower() for item in suffix))) root = dir_path def _scandir(dir_path, suffix, recursive, case_sensitive): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): rel_path = osp.relpath(entry.path, root) _rel_path = (rel_path if case_sensitive else rel_path.lower()) if ((suffix is None) or _rel_path.endswith(suffix)): (yield rel_path) elif (recursive and os.path.isdir(entry.path)): (yield from _scandir(entry.path, suffix, recursive, case_sensitive)) return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git',)): 'Finds the root directory (including itself) of specified markers.\n\n Args:\n path (str): Path of directory or file.\n markers (list[str], optional): List of file or directory names.\n\n Returns:\n The directory contained one of the markers or None if not found.\n ' if osp.isfile(path): path = osp.dirname(path) (prev, cur) = (None, osp.abspath(osp.expanduser(path))) while (cur != prev): if any((osp.exists(osp.join(cur, marker)) for marker in markers)): return cur (prev, cur) = (cur, osp.split(cur)[0]) return None
class ProgressBar(): 'A progress bar which can print the progress.' def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): self.task_num = task_num self.bar_width = bar_width self.completed = 0 self.file = file if start: self.start() @property def terminal_width(self): (width, _) = get_terminal_size() return width def start(self): if (self.task_num > 0): self.file.write(f"[{(' ' * self.bar_width)}] 0/{self.task_num}, elapsed: 0s, ETA:") else: self.file.write('completed: 0, elapsed: 0s') self.file.flush() self.timer = Timer() def update(self, num_tasks=1): assert (num_tasks > 0) self.completed += num_tasks elapsed = self.timer.since_start() if (elapsed > 0): fps = (self.completed / elapsed) else: fps = float('inf') if (self.task_num > 0): percentage = (self.completed / float(self.task_num)) eta = int((((elapsed * (1 - percentage)) / percentage) + 0.5)) msg = f''' [{{}}] {self.completed}/{self.task_num}, {fps:.1f} task/s, elapsed: {int((elapsed + 0.5))}s, ETA: {eta:5}s''' bar_width = min(self.bar_width, (int((self.terminal_width - len(msg))) + 2), int((self.terminal_width * 0.6))) bar_width = max(2, bar_width) mark_width = int((bar_width * percentage)) bar_chars = (('>' * mark_width) + (' ' * (bar_width - mark_width))) self.file.write(msg.format(bar_chars)) else: self.file.write(f'completed: {self.completed}, elapsed: {int((elapsed + 0.5))}s, {fps:.1f} tasks/s') self.file.flush()
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): 'Track the progress of tasks execution with a progress bar.\n\n Tasks are done with a simple for-loop.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() prog_bar.file.write('\n') return results
def init_pool(process_num, initializer=None, initargs=None): if (initializer is None): return Pool(process_num) elif (initargs is None): return Pool(process_num, initializer) else: if (not isinstance(initargs, tuple)): raise TypeError('"initargs" must be a tuple') return Pool(process_num, initializer, initargs)
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): 'Track the progress of parallel task execution with a progress bar.\n\n The built-in :mod:`multiprocessing` module is used for process pools and\n tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n nproc (int): Process (worker) number.\n initializer (None or callable): Refer to :class:`multiprocessing.Pool`\n for details.\n initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for\n details.\n chunksize (int): Refer to :class:`multiprocessing.Pool` for details.\n bar_width (int): Width of progress bar.\n skip_first (bool): Whether to skip the first sample for each worker\n when estimating fps, since the initialization step may takes\n longer.\n keep_order (bool): If True, :func:`Pool.imap` is used, otherwise\n :func:`Pool.imap_unordered` is used.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = (not skip_first) task_num -= ((nproc * chunksize) * int(skip_first)) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if (len(results) < (nproc * chunksize)): continue elif (len(results) == (nproc * chunksize)): prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results
def track_iter_progress(tasks, bar_width=50, file=sys.stdout): 'Track the progress of tasks iteration or enumeration with a progress\n bar.\n\n Tasks are yielded with a simple for-loop.\n\n Args:\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Yields:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) for task in tasks: (yield task) prog_bar.update() prog_bar.file.write('\n')
def build_from_cfg(cfg, registry, default_args=None): 'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n object: The constructed object.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): if ((default_args is None) or ('type' not in default_args)): raise KeyError(f'''`cfg` or `default_args` must contain the key "type", but got {cfg} {default_args}''') if (not isinstance(registry, Registry)): raise TypeError(f'registry must be an mmcv.Registry object, but got {type(registry)}') if (not (isinstance(default_args, dict) or (default_args is None))): raise TypeError(f'default_args must be a dict or None, but got {type(default_args)}') args = cfg.copy() if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) obj_type = args.pop('type') if isinstance(obj_type, str): obj_cls = registry.get(obj_type) if (obj_cls is None): raise KeyError(f'{obj_type} is not in the {registry.name} registry') elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') try: return obj_cls(**args) except Exception as e: raise type(e)(f'{obj_cls.__name__}: {e}')
class Registry(): "A registry to map strings to classes.\n\n Registered object could be built from registry.\n\n Example:\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = MODELS.build(dict(type='ResNet'))\n\n Please refer to\n https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for\n advanced usage.\n\n Args:\n name (str): Registry name.\n build_func(func, optional): Build function to construct instance from\n Registry, func:`build_from_cfg` is used if neither ``parent`` or\n ``build_func`` is specified. If ``parent`` is specified and\n ``build_func`` is not given, ``build_func`` will be inherited\n from ``parent``. Default: None.\n parent (Registry, optional): Parent registry. The class registered in\n children registry could be built from parent. Default: None.\n scope (str, optional): The scope of registry. It is the key to search\n for children registry. If not specified, scope will be the name of\n the package where class is defined, e.g. mmdet, mmcls, mmseg.\n Default: None.\n " def __init__(self, name, build_func=None, parent=None, scope=None): self._name = name self._module_dict = dict() self._children = dict() self._scope = (self.infer_scope() if (scope is None) else scope) if (build_func is None): if (parent is not None): self.build_func = parent.build_func else: self.build_func = build_from_cfg else: self.build_func = build_func if (parent is not None): assert isinstance(parent, Registry) parent._add_children(self) self.parent = parent else: self.parent = None def __len__(self): return len(self._module_dict) def __contains__(self, key): return (self.get(key) is not None) def __repr__(self): format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})') return format_str @staticmethod def infer_scope(): "Infer the scope of registry.\n\n The name of the package where registry is defined will be returned.\n\n Example:\n >>> # in mmdet/models/backbone/resnet.py\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n The scope of ``ResNet`` will be ``mmdet``.\n\n Returns:\n str: The inferred scope name.\n " filename = inspect.getmodule(inspect.stack()[2][0]).__name__ split_filename = filename.split('.') return split_filename[0] @staticmethod def split_scope_key(key): "Split scope and key.\n\n The first scope will be split from key.\n\n Examples:\n >>> Registry.split_scope_key('mmdet.ResNet')\n 'mmdet', 'ResNet'\n >>> Registry.split_scope_key('ResNet')\n None, 'ResNet'\n\n Return:\n tuple[str | None, str]: The former element is the first scope of\n the key, which can be ``None``. The latter is the remaining key.\n " split_index = key.find('.') if (split_index != (- 1)): return (key[:split_index], key[(split_index + 1):]) else: return (None, key) @property def name(self): return self._name @property def scope(self): return self._scope @property def module_dict(self): return self._module_dict @property def children(self): return self._children def get(self, key): 'Get the registry record.\n\n Args:\n key (str): The class name in string format.\n\n Returns:\n class: The corresponding class.\n ' (scope, real_key) = self.split_scope_key(key) if ((scope is None) or (scope == self._scope)): if (real_key in self._module_dict): return self._module_dict[real_key] elif (scope in self._children): return self._children[scope].get(real_key) else: parent = self.parent while (parent.parent is not None): parent = parent.parent return parent.get(key) def build(self, *args, **kwargs): return self.build_func(*args, **kwargs, registry=self) def _add_children(self, registry): "Add children for a registry.\n\n The ``registry`` will be added as children based on its scope.\n The parent registry could build objects from children registry.\n\n Example:\n >>> models = Registry('models')\n >>> mmdet_models = Registry('models', parent=models)\n >>> @mmdet_models.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = models.build(dict(type='mmdet.ResNet'))\n " assert isinstance(registry, Registry) assert (registry.scope is not None) assert (registry.scope not in self.children), f'scope {registry.scope} exists in {self.name} registry' self.children[registry.scope] = registry def _register_module(self, module_class, module_name=None, force=False): if (not inspect.isclass(module_class)): raise TypeError(f'module must be a class, but got {type(module_class)}') if (module_name is None): module_name = module_class.__name__ if isinstance(module_name, str): module_name = [module_name] for name in module_name: if ((not force) and (name in self._module_dict)): raise KeyError(f'{name} is already registered in {self.name}') self._module_dict[name] = module_class def deprecated_register_module(self, cls=None, force=False): warnings.warn('The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.', DeprecationWarning) if (cls is None): return partial(self.deprecated_register_module, force=force) self._register_module(cls, force=force) return cls def register_module(self, name=None, force=False, module=None): "Register a module.\n\n A record will be added to `self._module_dict`, whose key is the class\n name or the specified name, and value is the class itself.\n It can be used as a decorator or a normal function.\n\n Example:\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module()\n >>> class ResNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module(name='mnet')\n >>> class MobileNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> class ResNet:\n >>> pass\n >>> backbones.register_module(ResNet)\n\n Args:\n name (str | None): The module name to be registered. If not\n specified, the class name will be used.\n force (bool, optional): Whether to override an existing class with\n the same name. Default: False.\n module (type): Module class to be registered.\n " if (not isinstance(force, bool)): raise TypeError(f'force must be a boolean, but got {type(force)}') if isinstance(name, type): return self.deprecated_register_module(name, force=force) if (not ((name is None) or isinstance(name, str) or is_seq_of(name, str))): raise TypeError(f'name must be either of None, an instance of str or a sequence of str, but got {type(name)}') if (module is not None): self._register_module(module_class=module, module_name=name, force=force) return module def _register(cls): self._register_module(module_class=cls, module_name=name, force=force) return cls return _register
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int): 'Function to initialize each worker.\n\n The seed of each worker equals to\n ``num_worker * rank + worker_id + user_seed``.\n\n Args:\n worker_id (int): Id for each worker.\n num_workers (int): Number of workers.\n rank (int): Rank in distributed training.\n seed (int): Random seed.\n ' worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed) torch.manual_seed(worker_seed)
def check_python_script(cmd): 'Run the python cmd script with `__main__`. The difference between\n `os.system` is that, this function exectues code in the current process, so\n that it can be tracked by coverage tools. Currently it supports two forms:\n\n - ./tests/data/scripts/hello.py zz\n - python tests/data/scripts/hello.py zz\n ' args = split(cmd) if (args[0] == 'python'): args = args[1:] with patch.object(sys, 'argv', args): run_path(args[0], run_name='__main__')
def _any(judge_result): 'Since built-in ``any`` works only when the element of iterable is not\n iterable, implement the function.' if (not isinstance(judge_result, Iterable)): return judge_result try: for element in judge_result: if _any(element): return True except TypeError: if judge_result: return True return False
def assert_dict_contains_subset(dict_obj: Dict[(Any, Any)], expected_subset: Dict[(Any, Any)]) -> bool: 'Check if the dict_obj contains the expected_subset.\n\n Args:\n dict_obj (Dict[Any, Any]): Dict object to be checked.\n expected_subset (Dict[Any, Any]): Subset expected to be contained in\n dict_obj.\n\n Returns:\n bool: Whether the dict_obj contains the expected_subset.\n ' for (key, value) in expected_subset.items(): if ((key not in dict_obj.keys()) or _any((dict_obj[key] != value))): return False return True
def assert_attrs_equal(obj: Any, expected_attrs: Dict[(str, Any)]) -> bool: 'Check if attribute of class object is correct.\n\n Args:\n obj (object): Class object to be checked.\n expected_attrs (Dict[str, Any]): Dict of the expected attrs.\n\n Returns:\n bool: Whether the attribute of class object is correct.\n ' for (attr, value) in expected_attrs.items(): if ((not hasattr(obj, attr)) or _any((getattr(obj, attr) != value))): return False return True
def assert_dict_has_keys(obj: Dict[(str, Any)], expected_keys: List[str]) -> bool: 'Check if the obj has all the expected_keys.\n\n Args:\n obj (Dict[str, Any]): Object to be checked.\n expected_keys (List[str]): Keys expected to contained in the keys of\n the obj.\n\n Returns:\n bool: Whether the obj has the expected keys.\n ' return set(expected_keys).issubset(set(obj.keys()))
def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool: 'Check if target_keys is equal to result_keys.\n\n Args:\n result_keys (List[str]): Result keys to be checked.\n target_keys (List[str]): Target keys to be checked.\n\n Returns:\n bool: Whether target_keys is equal to result_keys.\n ' return (set(result_keys) == set(target_keys))
def assert_is_norm_layer(module) -> bool: 'Check if the module is a norm layer.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the module is a norm layer.\n ' from torch.nn import GroupNorm, LayerNorm from .parrots_wrapper import _BatchNorm, _InstanceNorm norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm) return isinstance(module, norm_layer_candidates)
def assert_params_all_zeros(module) -> bool: 'Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ' weight_data = module.weight.data is_weight_zero = weight_data.allclose(weight_data.new_zeros(weight_data.size())) if (hasattr(module, 'bias') and (module.bias is not None)): bias_data = module.bias.data is_bias_zero = bias_data.allclose(bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return (is_weight_zero and is_bias_zero)
class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message)
class Timer(): "A flexible Timer class.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> with mmcv.Timer():\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n 1.000\n >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n it takes 1.0 seconds\n >>> timer = mmcv.Timer()\n >>> time.sleep(0.5)\n >>> print(timer.since_start())\n 0.500\n >>> time.sleep(0.5)\n >>> print(timer.since_last_check())\n 0.500\n >>> print(timer.since_start())\n 1.000\n " def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() @property def is_running(self): 'bool: indicate whether the timer is running' return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): 'Start the timer.' if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): 'Total time since the timer is started.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): 'Time since the last checking.\n\n Either :func:`since_start` or :func:`since_last_check` is a checking\n operation.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur
def check_time(timer_id): "Add check points in a single line.\n\n This method is suitable for running a task on a list of items. A timer will\n be registered when the method is called for the first time.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> for i in range(1, 6):\n >>> # simulate a code block\n >>> time.sleep(i)\n >>> mmcv.check_time('task1')\n 2.000\n 3.000\n 4.000\n 5.000\n\n Args:\n str: Timer identifier.\n " if (timer_id not in _g_timers): _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
def is_jit_tracing() -> bool: if ((torch.__version__ != 'parrots') and (digit_version(torch.__version__) >= digit_version('1.6.0'))): on_trace = torch.jit.is_tracing() if isinstance(on_trace, bool): return on_trace else: return torch._C._is_tracing() else: warnings.warn('torch.jit.is_tracing is only supported after v1.6.0. Therefore is_tracing returns False automatically. Please set on_trace manually if you are using trace.', UserWarning) return False
def digit_version(version_str: str, length: int=4): 'Convert a version string into a tuple of integers.\n\n This method is usually used for comparing two versions. For pre-release\n versions: alpha < beta < rc.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int]: The version info in digits (integers).\n ' assert ('parrots' not in version_str) version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: mapping = {'a': (- 3), 'b': (- 2), 'rc': (- 1)} val = (- 4) if version.pre: if (version.pre[0] not in mapping): warnings.warn(f'unknown prerelease version {version.pre[0]}, version checking may go wrong') else: val = mapping[version.pre[0]] release.extend([val, version.pre[(- 1)]]) else: release.extend([val, 0]) elif version.is_postrelease: release.extend([1, version.post]) else: release.extend([0, 0]) return tuple(release)
def _minimal_ext_cmd(cmd): env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if (v is not None): env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out
def get_git_hash(fallback='unknown', digits=None): "Get the git hash of the current repo.\n\n Args:\n fallback (str, optional): The fallback string when git hash is\n unavailable. Defaults to 'unknown'.\n digits (int, optional): kept digits of the hash. Defaults to None,\n meaning all digits are kept.\n\n Returns:\n str: Git commit hash.\n " if ((digits is not None) and (not isinstance(digits, int))): raise TypeError('digits must be None or an integer') try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') if (digits is not None): sha = sha[:digits] except OSError: sha = fallback return sha
def parse_version_info(version_str: str, length: int=4) -> tuple: 'Parse a version string into a tuple.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int | str]: The version info, e.g., "1.3.0" is parsed into\n (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into\n (2, 0, 0, 0, \'rc\', 1) (when length is set to 4).\n ' from packaging.version import parse version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: release.extend(list(version.pre)) elif version.is_postrelease: release.extend(list(version.post)) else: release.extend([0, 0]) return tuple(release)
class Cache(): def __init__(self, capacity): self._cache = OrderedDict() self._capacity = int(capacity) if (capacity <= 0): raise ValueError('capacity must be a positive integer') @property def capacity(self): return self._capacity @property def size(self): return len(self._cache) def put(self, key, val): if (key in self._cache): return if (len(self._cache) >= self.capacity): self._cache.popitem(last=False) self._cache[key] = val def get(self, key, default=None): val = (self._cache[key] if (key in self._cache) else default) return val
class VideoReader(): "Video class with similar usage to a list object.\n\n This video warpper class provides convenient apis to access frames.\n There exists an issue of OpenCV's VideoCapture class that jumping to a\n certain frame may be inaccurate. It is fixed in this class by checking\n the position after jumping each time.\n Cache is used when decoding videos. So if the same frame is visited for\n the second time, there is no need to decode again if it is stored in the\n cache.\n\n Examples:\n >>> import mmcv\n >>> v = mmcv.VideoReader('sample.mp4')\n >>> len(v) # get the total frame number with `len()`\n 120\n >>> for img in v: # v is iterable\n >>> mmcv.imshow(img)\n >>> v[5] # get the 6th frame\n " def __init__(self, filename, cache_capacity=10): if (not filename.startswith(('https://', 'http://'))): check_file_exist(filename, ('Video file not found: ' + filename)) self._vcap = cv2.VideoCapture(filename) assert (cache_capacity > 0) self._cache = Cache(cache_capacity) self._position = 0 self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) self._fps = self._vcap.get(CAP_PROP_FPS) self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) self._fourcc = self._vcap.get(CAP_PROP_FOURCC) @property def vcap(self): ':obj:`cv2.VideoCapture`: The raw VideoCapture object.' return self._vcap @property def opened(self): 'bool: Indicate whether the video is opened.' return self._vcap.isOpened() @property def width(self): 'int: Width of video frames.' return self._width @property def height(self): 'int: Height of video frames.' return self._height @property def resolution(self): 'tuple: Video resolution (width, height).' return (self._width, self._height) @property def fps(self): 'float: FPS of the video.' return self._fps @property def frame_cnt(self): 'int: Total frames of the video.' return self._frame_cnt @property def fourcc(self): 'str: "Four character code" of the video.' return self._fourcc @property def position(self): 'int: Current cursor position, indicating frame decoded.' return self._position def _get_real_position(self): return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) def _set_real_position(self, frame_id): self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) pos = self._get_real_position() for _ in range((frame_id - pos)): self._vcap.read() self._position = frame_id def read(self): 'Read the next frame.\n\n If the next frame have been decoded before and in the cache, then\n return it directly, otherwise decode, cache and return it.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if self._cache: img = self._cache.get(self._position) if (img is not None): ret = True else: if (self._position != self._get_real_position()): self._set_real_position(self._position) (ret, img) = self._vcap.read() if ret: self._cache.put(self._position, img) else: (ret, img) = self._vcap.read() if ret: self._position += 1 return img def get_frame(self, frame_id): 'Get frame by index.\n\n Args:\n frame_id (int): Index of the expected frame, 0-based.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if ((frame_id < 0) or (frame_id >= self._frame_cnt)): raise IndexError(f'"frame_id" must be between 0 and {(self._frame_cnt - 1)}') if (frame_id == self._position): return self.read() if self._cache: img = self._cache.get(frame_id) if (img is not None): self._position = (frame_id + 1) return img self._set_real_position(frame_id) (ret, img) = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img def current_frame(self): 'Get the current frame (frame that is just visited).\n\n Returns:\n ndarray or None: If the video is fresh, return None, otherwise\n return the frame.\n ' if (self._position == 0): return None return self._cache.get((self._position - 1)) def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): 'Convert a video to frame images.\n\n Args:\n frame_dir (str): Output directory to store all the frame images.\n file_start (int): Filenames will start from the specified number.\n filename_tmpl (str): Filename template with the index as the\n placeholder.\n start (int): The starting frame index.\n max_num (int): Maximum number of frames to be written.\n show_progress (bool): Whether to show a progress bar.\n ' mkdir_or_exist(frame_dir) if (max_num == 0): task_num = (self.frame_cnt - start) else: task_num = min((self.frame_cnt - start), max_num) if (task_num <= 0): raise ValueError('start must be less than total frame number') if (start > 0): self._set_real_position(start) def write_frame(file_idx): img = self.read() if (img is None): return filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, (file_start + task_num))) else: for i in range(task_num): write_frame((file_start + i)) def __len__(self): return self.frame_cnt def __getitem__(self, index): if isinstance(index, slice): return [self.get_frame(i) for i in range(*index.indices(self.frame_cnt))] if (index < 0): index += self.frame_cnt if (index < 0): raise IndexError('index out of range') return self.get_frame(index) def __iter__(self): self._set_real_position(0) return self def __next__(self): img = self.read() if (img is not None): return img else: raise StopIteration next = __next__ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._vcap.release()
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): 'Read the frame images from a directory and join them as a video.\n\n Args:\n frame_dir (str): The directory containing video frames.\n video_file (str): Output filename.\n fps (float): FPS of the output video.\n fourcc (str): Fourcc of the output video, this should be compatible\n with the output file type.\n filename_tmpl (str): Filename template with the index as the variable.\n start (int): Starting frame index.\n end (int): Ending frame index.\n show_progress (bool): Whether to show a progress bar.\n ' if (end == 0): ext = filename_tmpl.split('.')[(- 1)] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, ('The start frame not found: ' + first_file)) img = cv2.imread(first_file) (height, width) = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): write_frame(i) vwriter.release()
@requires_executable('ffmpeg') def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): 'Convert a video with ffmpeg.\n\n This provides a general api to ffmpeg, the executed command is::\n\n `ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`\n\n Options(kwargs) are mapped to ffmpeg commands with the following rules:\n\n - key=val: "-key val"\n - key=True: "-key"\n - key=False: ""\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n pre_options (str): Options appears before "-i <in_file>".\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = [] for (k, v) in kwargs.items(): if isinstance(v, bool): if v: options.append(f'-{k}') elif (k == 'log_level'): assert (v in ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace']) options.append(f'-loglevel {v}') else: options.append(f'-{k} {v}') cmd = f"ffmpeg -y {pre_options} -i {in_file} {' '.join(options)} {out_file}" if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
@requires_executable('ffmpeg') def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False): 'Resize a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\n ratio (tuple or float): Expected resize ratio, (2, 0.5) means\n (w*2, h*0.5).\n keep_ar (bool): Whether to keep original aspect ratio.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' if ((size is None) and (ratio is None)): raise ValueError('expected size or ratio must be specified') if ((size is not None) and (ratio is not None)): raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if (not keep_ar): options['vf'] = f'scale={size[0]}:{size[1]}' else: options['vf'] = f'scale=w={size[0]}:h={size[1]}:force_original_aspect_ratio=decrease' else: if (not isinstance(ratio, tuple)): ratio = (ratio, ratio) options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"' convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Cut a clip from a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n start (None or float): Start time (in seconds).\n end (None or float): End time (in seconds).\n vcodec (None or str): Output video codec, None for unchanged.\n acodec (None or str): Output audio codec, None for unchanged.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = (end - start) convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Concatenate multiple videos into a single one.\n\n Args:\n video_list (list): A list of video filenames\n out_file (str): Output video filename\n vcodec (None or str): Output video codec, None for unchanged\n acodec (None or str): Output audio codec, None for unchanged\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' (tmp_filehandler, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write(f'''file {osp.abspath(filename)} ''') options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.close(tmp_filehandler) os.remove(tmp_filename)
class Color(Enum): 'An enum that defines common colors.\n\n Contains red, green, blue, cyan, yellow, magenta, white and black.\n ' red = (0, 0, 255) green = (0, 255, 0) blue = (255, 0, 0) cyan = (255, 255, 0) yellow = (0, 255, 255) magenta = (255, 0, 255) white = (255, 255, 255) black = (0, 0, 0)
def color_val(color): 'Convert various input to color tuples.\n\n Args:\n color (:obj:`Color`/str/tuple/int/ndarray): Color inputs\n\n Returns:\n tuple[int]: A tuple of 3 integers indicating BGR channels.\n ' if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert (len(color) == 3) for channel in color: assert (0 <= channel <= 255) return color elif isinstance(color, int): assert (0 <= color <= 255) return (color, color, color) elif isinstance(color, np.ndarray): assert ((color.ndim == 1) and (color.size == 3)) assert np.all(((color >= 0) & (color <= 255))) color = color.astype(np.uint8) return tuple(color) else: raise TypeError(f'Invalid type for color: {type(color)}')
def choose_requirement(primary, secondary): 'If some version of primary requirement installed, return primary, else\n return secondary.' try: name = re.split('[!<>=]', primary)[0] get_distribution(name) except DistributionNotFound: return secondary return str(primary)
def get_version(): version_file = 'mmcv/version.py' with open(version_file, 'r', encoding='utf-8') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True): 'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n ' import sys from os.path import exists require_fpath = fname def parse_line(line): 'Parse information from a line in a requirements text file.' if line.startswith('-r '): target = line.split(' ')[1] for info in parse_require_file(target): (yield info) else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] else: pat = (('(' + '|'.join(['>=', '==', '>'])) + ')') parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if (len(parts) > 1): (op, rest) = parts[1:] if (';' in rest): (version, platform_deps) = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest info['version'] = (op, version) (yield info) def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if (line and (not line.startswith('#'))): for info in parse_line(line): (yield info) def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if (with_version and ('version' in info)): parts.extend(info['version']) if (not sys.version.startswith('3.4')): platform_deps = info.get('platform_deps') if (platform_deps is not None): parts.append((';' + platform_deps)) item = ''.join(parts) (yield item) packages = list(gen_packages_items()) return packages
def get_extensions(): extensions = [] if (os.getenv('MMCV_WITH_TRT', '0') != '0'): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom TensorRT Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_trt' from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] tensorrt_path = os.getenv('TENSORRT_DIR', '0') tensorrt_lib_path = glob.glob(os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0] library_dirs += [tensorrt_lib_path] libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin'] libraries += ['cudart'] define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda') include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt') include_dirs.append(include_path) include_dirs.append(include_trt_path) include_dirs.append(os.path.join(tensorrt_path, 'include')) include_dirs += include_paths(cuda=True) op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*') define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('MMCV_WITH_TRT', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) extra_compile_args['nvcc'] += ['-Xcompiler=-fno-gnu-unique'] library_dirs += library_paths(cuda=True) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) if (os.getenv('MMCV_WITH_OPS', '0') == '0'): return extensions if (EXT_TYPE == 'parrots'): ext_name = 'mmcv._ext' from parrots.utils.build_extension import Extension define_macros = [] include_dirs = [] op_files = ((glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/parrots/*.cpp')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args = {'nvcc': ([cuda_args, '-std=c++14'] if cuda_args else ['-std=c++14']), 'cxx': ['-std=c++14']} if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] extra_compile_args['nvcc'] += ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'] ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, cuda=True, pytorch=True) extensions.append(ext_ops) elif (EXT_TYPE == 'pytorch'): ext_name = 'mmcv._ext' from torch.utils.cpp_extension import CppExtension, CUDAExtension try: import psutil num_cpu = len(psutil.Process().cpu_affinity()) cpu_use = max(4, (num_cpu - 1)) except (ModuleNotFoundError, AttributeError): cpu_use = 4 os.environ.setdefault('MAX_JOBS', str(cpu_use)) define_macros = [] extra_compile_args = {'cxx': []} if (platform.system() != 'Windows'): extra_compile_args['cxx'] = ['-std=c++14'] include_dirs = [] is_rocm_pytorch = False try: from torch.utils.cpp_extension import ROCM_HOME is_rocm_pytorch = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass project_dir = 'mmcv/ops/csrc/' if is_rocm_pytorch: from torch.utils.hipify import hipify_python hipify_python.hipify(project_directory=project_dir, output_directory=project_dir, includes='mmcv/ops/csrc/*', show_detailed=True, is_pytorch_extension=True) define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('HIP_DIFF', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (glob.glob('./mmcv/ops/csrc/pytorch/hip/*') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/hip/*')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip')) elif (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (((glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) else: print(f'Compiling {ext_name} without CUDA') op_files = (glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) extension = CppExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) if (('nvcc' in extra_compile_args) and (platform.system() != 'Windows')): extra_compile_args['nvcc'] += ['-std=c++14'] ext_ops = extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args) extensions.append(ext_ops) if ((EXT_TYPE == 'pytorch') and (os.getenv('MMCV_WITH_ORT', '0') != '0')): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom ONNXRuntime Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_ort' import onnxruntime from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] ort_path = os.getenv('ONNXRUNTIME_DIR', '0') library_dirs += [os.path.join(ort_path, 'lib')] libraries.append('onnxruntime') define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime') include_dirs.append(include_path) include_dirs.append(os.path.join(ort_path, 'include')) op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*') if ((onnxruntime.get_device() == 'GPU') or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*') include_dirs += include_paths(cuda=True) library_dirs += library_paths(cuda=True) else: include_dirs += include_paths(cuda=False) library_dirs += library_paths(cuda=False) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) return extensions
def test_quantize(): arr = np.random.randn(10, 10) levels = 20 qarr = mmcv.quantize(arr, (- 1), 1, levels) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('int64')) for i in range(arr.shape[0]): for j in range(arr.shape[1]): ref = min((levels - 1), int(np.floor((10 * (1 + max(min(arr[(i, j)], 1), (- 1))))))) assert (qarr[(i, j)] == ref) qarr = mmcv.quantize(arr, (- 1), 1, 20, dtype=np.uint8) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('uint8')) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.quantize(arr, 2, 1, levels)
def test_dequantize(): levels = 20 qarr = np.random.randint(levels, size=(10, 10)) arr = mmcv.dequantize(qarr, (- 1), 1, levels) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float64')) for i in range(qarr.shape[0]): for j in range(qarr.shape[1]): assert (arr[(i, j)] == (((qarr[(i, j)] + 0.5) / 10) - 1)) arr = mmcv.dequantize(qarr, (- 1), 1, levels, dtype=np.float32) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float32')) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.dequantize(arr, 2, 1, levels)
def test_joint(): arr = np.random.randn(100, 100) levels = 1000 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert (np.abs((recover[(arr < (- 1))] + 0.999)).max() < 1e-06) assert (np.abs((recover[(arr > 1)] - 0.999)).max() < 1e-06) assert (np.abs((recover - arr)[((arr >= (- 1)) & (arr <= 1))]).max() <= 0.001) arr = np.clip((np.random.randn(100) / 1000), (- 0.01), 0.01) levels = 99 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert np.all((recover == 0))
def test_build_conv_layer(): with pytest.raises(TypeError): cfg = 'Conv2d' build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(kernel_size=3) build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyConv') build_conv_layer(cfg) kwargs = dict(in_channels=4, out_channels=8, kernel_size=3, groups=2, dilation=2) cfg = None layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='Conv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='deconv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) kwargs.pop('groups') for (type_name, module) in CONV_LAYERS.module_dict.items(): cfg = dict(type=type_name) if ((type_name == 'SparseInverseConv2d') or (type_name == 'SparseInverseConv3d')): kwargs.pop('dilation') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, module) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) kwargs['dilation'] = 2
def test_infer_norm_abbr(): with pytest.raises(TypeError): infer_norm_abbr(0) class MyNorm(): _abbr_ = 'mn' assert (infer_norm_abbr(MyNorm) == 'mn') class FancyBatchNorm(): pass assert (infer_norm_abbr(FancyBatchNorm) == 'bn') class FancyInstanceNorm(): pass assert (infer_norm_abbr(FancyInstanceNorm) == 'in') class FancyLayerNorm(): pass assert (infer_norm_abbr(FancyLayerNorm) == 'ln') class FancyGroupNorm(): pass assert (infer_norm_abbr(FancyGroupNorm) == 'gn') class FancyNorm(): pass assert (infer_norm_abbr(FancyNorm) == 'norm_layer')
def test_build_norm_layer(): with pytest.raises(TypeError): cfg = 'BN' build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict() build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict(type='FancyNorm') build_norm_layer(cfg, 3) with pytest.raises(AssertionError): cfg = dict(type='BN') build_norm_layer(cfg, 3, postfix=[1, 2]) with pytest.raises(AssertionError): cfg = dict(type='GN') build_norm_layer(cfg, 3) abbr_mapping = {'BN': 'bn', 'BN1d': 'bn', 'BN2d': 'bn', 'BN3d': 'bn', 'SyncBN': 'bn', 'GN': 'gn', 'LN': 'ln', 'IN': 'in', 'IN1d': 'in', 'IN2d': 'in', 'IN3d': 'in'} for (type_name, module) in NORM_LAYERS.module_dict.items(): if (type_name == 'MMSyncBN'): continue for postfix in ['_test', 1]: cfg = dict(type=type_name) if (type_name == 'GN'): cfg['num_groups'] = 2 (name, layer) = build_norm_layer(cfg, 3, postfix=postfix) assert (name == (abbr_mapping[type_name] + str(postfix))) assert isinstance(layer, module) if (type_name == 'GN'): assert (layer.num_channels == 3) assert (layer.num_groups == cfg['num_groups']) elif (type_name != 'LN'): assert (layer.num_features == 3)
def test_build_activation_layer(): with pytest.raises(TypeError): cfg = 'ReLU' build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyReLU') build_activation_layer(cfg) for (type_name, module) in ACTIVATION_LAYERS.module_dict.items(): cfg['type'] = type_name layer = build_activation_layer(cfg) assert isinstance(layer, module) act = build_activation_layer(dict(type='Clamp')) x = (torch.randn(10) * 1000) y = act(x) assert np.logical_and((y >= (- 1)).numpy(), (y <= 1).numpy()).all() act = build_activation_layer(dict(type='Clip', min=0)) y = act(x) assert np.logical_and((y >= 0).numpy(), (y <= 1).numpy()).all() act = build_activation_layer(dict(type='Clamp', max=0)) y = act(x) assert np.logical_and((y >= (- 1)).numpy(), (y <= 0).numpy()).all()
def test_build_padding_layer(): with pytest.raises(TypeError): cfg = 'reflect' build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPad') build_padding_layer(cfg) for (type_name, module) in PADDING_LAYERS.module_dict.items(): cfg['type'] = type_name layer = build_padding_layer(cfg, 2) assert isinstance(layer, module) input_x = torch.randn(1, 2, 5, 5) cfg = dict(type='reflect') padding_layer = build_padding_layer(cfg, 2) res = padding_layer(input_x) assert (res.shape == (1, 2, 9, 9))
def test_upsample_layer(): with pytest.raises(TypeError): cfg = 'bilinear' build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyUpsample') build_upsample_layer(cfg) for type_name in ['nearest', 'bilinear']: cfg['type'] = type_name layer = build_upsample_layer(cfg) assert isinstance(layer, nn.Upsample) assert (layer.mode == type_name) cfg = dict(type='deconv', in_channels=3, out_channels=3, kernel_size=3, stride=2) layer = build_upsample_layer(cfg) assert isinstance(layer, nn.ConvTranspose2d) cfg = dict(type='deconv') kwargs = dict(in_channels=3, out_channels=3, kernel_size=3, stride=2) layer = build_upsample_layer(cfg, **kwargs) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.stride == (kwargs['stride'], kwargs['stride'])) layer = build_upsample_layer(cfg, 3, 3, 3, 2) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.stride == (kwargs['stride'], kwargs['stride'])) cfg = dict(type='pixel_shuffle', in_channels=3, out_channels=3, scale_factor=2, upsample_kernel=3) layer = build_upsample_layer(cfg) assert isinstance(layer, PixelShufflePack) assert (layer.scale_factor == 2) assert (layer.upsample_kernel == 3)
def test_pixel_shuffle_pack(): x_in = torch.rand(2, 3, 10, 10) pixel_shuffle = PixelShufflePack(3, 3, scale_factor=2, upsample_kernel=3) assert (pixel_shuffle.upsample_conv.kernel_size == (3, 3)) x_out = pixel_shuffle(x_in) assert (x_out.shape == (2, 3, 20, 20))
def test_is_norm(): norm_set1 = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm] norm_set2 = [nn.GroupNorm] for norm_type in norm_set1: layer = norm_type(3) assert is_norm(layer) assert (not is_norm(layer, exclude=(norm_type,))) for norm_type in norm_set2: layer = norm_type(3, 6) assert is_norm(layer) assert (not is_norm(layer, exclude=(norm_type,))) class MyNorm(nn.BatchNorm2d): pass layer = MyNorm(3) assert is_norm(layer) assert (not is_norm(layer, exclude=_BatchNorm)) assert (not is_norm(layer, exclude=(_BatchNorm,))) layer = nn.Conv2d(3, 8, 1) assert (not is_norm(layer)) with pytest.raises(TypeError): layer = nn.BatchNorm1d(3) is_norm(layer, exclude='BN') with pytest.raises(TypeError): layer = nn.BatchNorm1d(3) is_norm(layer, exclude=('BN',))
def test_infer_plugin_abbr(): with pytest.raises(TypeError): infer_plugin_abbr(0) class MyPlugin(): _abbr_ = 'mp' assert (infer_plugin_abbr(MyPlugin) == 'mp') class FancyPlugin(): pass assert (infer_plugin_abbr(FancyPlugin) == 'fancy_plugin')
def test_build_plugin_layer(): with pytest.raises(TypeError): cfg = 'Plugin' build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPlugin') build_plugin_layer(cfg) with pytest.raises(AssertionError): cfg = dict(type='ConvModule') build_plugin_layer(cfg, postfix=[1, 2]) for postfix in ['', '_test', 1]: cfg = dict(type='ContextBlock') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, ratio=(1.0 / 4)) assert (name == ('context_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['ContextBlock']) for postfix in ['', '_test', 1]: cfg = dict(type='GeneralizedAttention') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16) assert (name == ('gen_attention_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['GeneralizedAttention']) for postfix in ['', '_test', 1]: cfg = dict(type='NonLocal2d') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16) assert (name == ('nonlocal_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['NonLocal2d']) for postfix in ['', '_test', 1]: cfg = dict(type='ConvModule') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, out_channels=4, kernel_size=3) assert (name == ('conv_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['ConvModule'])
def test_context_block(): with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), pooling_type='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fusion_types='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fusion_types=('unsupport_type',)) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), pooling_type='att') out = context_block(imgs) assert (context_block.conv_mask.in_channels == 16) assert (context_block.conv_mask.out_channels == 1) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), pooling_type='avg') out = context_block(imgs) assert hasattr(context_block, 'avg_pool') assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add',)) out = context_block(imgs) assert (context_block.channel_add_conv is not None) assert (context_block.channel_mul_conv is None) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_mul',)) out = context_block(imgs) assert (context_block.channel_add_conv is None) assert (context_block.channel_mul_conv is not None) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add', 'channel_mul')) out = context_block(imgs) assert (context_block.channel_add_conv is not None) assert (context_block.channel_mul_conv is not None) assert (out.shape == imgs.shape)
def test_conv2d_samepadding(): inputs = torch.rand((1, 3, 28, 28)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output = conv(inputs) assert (output.shape == inputs.shape) inputs = torch.rand((1, 3, 13, 13)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output = conv(inputs) assert (output.shape == inputs.shape) inputs = torch.rand((1, 3, 28, 28)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) output = conv(inputs) assert (output.shape == torch.Size([1, 3, 14, 14])) inputs = torch.rand((1, 3, 13, 13)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) output = conv(inputs) assert (output.shape == torch.Size([1, 3, 7, 7]))
@CONV_LAYERS.register_module() class ExampleConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None): super(ExampleConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.bias = bias self.norm_cfg = norm_cfg self.output_padding = (0, 0, 0) self.transposed = False self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size) self.init_weights() def forward(self, x): x = self.conv0(x) return x def init_weights(self): nn.init.constant_(self.conv0.weight, 0)
def test_conv_module(): with pytest.raises(AssertionError): conv_cfg = 'conv' ConvModule(3, 8, 2, conv_cfg=conv_cfg) with pytest.raises(AssertionError): norm_cfg = 'norm' ConvModule(3, 8, 2, norm_cfg=norm_cfg) with pytest.raises(KeyError): act_cfg = dict(type='softmax') ConvModule(3, 8, 2, act_cfg=act_cfg) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert conv.with_activation assert hasattr(conv, 'activate') assert conv.with_norm assert hasattr(conv, 'norm') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = ConvModule(3, 8, 2) assert conv.with_activation assert hasattr(conv, 'activate') assert (not conv.with_norm) assert (conv.norm is None) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = ConvModule(3, 8, 2, act_cfg=None) assert (not conv.with_norm) assert (conv.norm is None) assert (not conv.with_activation) assert (not hasattr(conv, 'activate')) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv_module = ConvModule(3, 8, 2, conv_cfg=dict(type='ExampleConv'), act_cfg=None) assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2)) conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True) assert hasattr(conv.conv, 'weight_orig') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect') assert isinstance(conv.padding_layer, nn.ReflectionPad2d) output = conv(x) assert (output.shape == (1, 8, 256, 256)) with pytest.raises(KeyError): conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists') conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU')) assert isinstance(conv.activate, nn.LeakyReLU) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh')) assert isinstance(conv.activate, nn.Tanh) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid')) assert isinstance(conv.activate, nn.Sigmoid) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU')) assert isinstance(conv.activate, nn.PReLU) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish')) if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.7'))): assert isinstance(conv.activate, HSwish) else: assert isinstance(conv.activate, nn.Hardswish) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid')) assert isinstance(conv.activate, HSigmoid) output = conv(x) assert (output.shape == (1, 8, 256, 256))
def test_bias(): conv = ConvModule(3, 8, 2) assert (conv.conv.bias is not None) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert (conv.conv.bias is None) conv = ConvModule(3, 8, 2, bias=False) assert (conv.conv.bias is None) with pytest.warns(UserWarning) as record: ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='BN')) assert (len(record) == 1) assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm') with pytest.warns(UserWarning) as record: ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='IN')) assert (len(record) == 1) assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm') with pytest.warns(UserWarning) as record: norm_cfg = dict(type='GN', num_groups=1) ConvModule(3, 8, 2, bias=True, norm_cfg=norm_cfg) warnings.warn('No warnings') assert (len(record) == 1) assert (record[0].message.args[0] == 'No warnings')
def conv_forward(self, x): return (x + '_conv')
def bn_forward(self, x): return (x + '_bn')
def relu_forward(self, x): return (x + '_relu')
@patch('torch.nn.ReLU.forward', relu_forward) @patch('torch.nn.BatchNorm2d.forward', bn_forward) @patch('torch.nn.Conv2d.forward', conv_forward) def test_order(): with pytest.raises(AssertionError): order = ['conv', 'norm', 'act'] ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm') ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm', 'norm') ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm', 'something') ConvModule(3, 8, 2, order=order) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input') assert (out == 'input_conv_bn_relu') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'), order=('norm', 'conv', 'act')) out = conv('input') assert (out == 'input_bn_conv_relu') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input', activate=False) assert (out == 'input_conv_bn') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input', norm=False) assert (out == 'input_conv_relu')
def test_depthwise_separable_conv(): with pytest.raises(AssertionError): DepthwiseSeparableConvModule(4, 8, 2, groups=2) conv = DepthwiseSeparableConvModule(3, 8, 2) assert (conv.depthwise_conv.conv.groups == 3) assert (conv.pointwise_conv.conv.kernel_size == (1, 1)) assert (not conv.depthwise_conv.with_norm) assert (not conv.pointwise_conv.with_norm) assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN')) assert (conv.depthwise_conv.norm_name == 'bn') assert (not conv.pointwise_conv.with_norm) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN')) assert (not conv.depthwise_conv.with_norm) assert (conv.pointwise_conv.norm_name == 'bn') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert (conv.depthwise_conv.norm_name == 'bn') assert (conv.pointwise_conv.norm_name == 'bn') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act')) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, with_spectral_norm=True) assert hasattr(conv.depthwise_conv.conv, 'weight_orig') assert hasattr(conv.pointwise_conv.conv, 'weight_orig') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, padding_mode='reflect') assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256))
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(3, 8, 3) def forward(self, imgs): x = torch.randn((1, *imgs)) return self.conv2d(x)
def input_constructor(x): return dict(imgs=x)
def test_flops_counter(): with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = [1, 3, 16, 16] get_model_complexity_info(model, input_res) with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = tuple() get_model_complexity_info(model, input_res) for item in gt_results: model = item['model'] input = item['input'] (flops, params) = get_model_complexity_info(model, input, as_strings=False, print_per_layer_stat=False) assert ((flops == item['flops']) and (params == item['params'])) model = ExampleModel() x = (3, 16, 16) (flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=False, input_constructor=input_constructor) assert ((flops == 43904.0) and (params == 224.0)) model = nn.Conv3d(3, 8, 3) x = (3, 3, 512, 512) (flops, params) = get_model_complexity_info(model, x, print_per_layer_stat=False) assert ((flops == '0.17 GFLOPs') and (params == str(656))) model = nn.Conv1d(3, 8, 3) x = (3, 16) out = StringIO() get_model_complexity_info(model, x, ost=out) assert (out.getvalue() == 'Conv1d(0.0 M, 100.000% Params, 0.0 GFLOPs, 100.000% FLOPs, 3, 8, kernel_size=(3,), stride=(1,))\n') model = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Flatten(), nn.Linear(1568, 2)) x = (3, 16, 16) (flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=True) assert ((flops == 47040.0) and (params == 3362))
def test_flops_to_string(): flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops) == '6.54 GFLOPs') assert (flops_to_string(flops, 'MFLOPs') == '6543.21 MFLOPs') assert (flops_to_string(flops, 'KFLOPs') == '6543210.0 KFLOPs') assert (flops_to_string(flops, 'FLOPs') == '6543210000.0 FLOPs') assert (flops_to_string(flops, precision=4) == '6.5432 GFLOPs') flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops, None) == '6.54 GFLOPs') flops = (3.21 * (10.0 ** 7)) assert (flops_to_string(flops, None) == '32.1 MFLOPs') flops = (5.4 * (10.0 ** 3)) assert (flops_to_string(flops, None) == '5.4 KFLOPs') flops = 987 assert (flops_to_string(flops, None) == '987 FLOPs')
def test_params_to_string(): num_params = (3.21 * (10.0 ** 7)) assert (params_to_string(num_params) == '32.1 M') num_params = (4.56 * (10.0 ** 5)) assert (params_to_string(num_params) == '456.0 k') num_params = (7.89 * (10.0 ** 2)) assert (params_to_string(num_params) == '789.0') num_params = (6.54321 * (10.0 ** 7)) assert (params_to_string(num_params, 'M') == '65.43 M') assert (params_to_string(num_params, 'K') == '65432.1 K') assert (params_to_string(num_params, '') == '65432100.0') assert (params_to_string(num_params, precision=4) == '65.4321 M')
def test_fuse_conv_bn(): inputs = torch.rand((1, 3, 5, 5)) modules = nn.ModuleList() modules.append(nn.BatchNorm2d(3)) modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN'))) modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN'))) modules = nn.Sequential(*modules) fused_modules = fuse_conv_bn(modules) assert torch.equal(modules(inputs), fused_modules(inputs))
def test_context_block(): imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='1000') assert (gen_attention_block.query_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0100') assert (gen_attention_block.query_conv.in_channels == 16) assert (gen_attention_block.appr_geom_fc_x.in_features == 8) assert (gen_attention_block.appr_geom_fc_y.in_features == 8) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0010') assert (gen_attention_block.key_conv.in_channels == 16) assert hasattr(gen_attention_block, 'appr_bias') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0001') assert (gen_attention_block.appr_geom_fc_x.in_features == 8) assert (gen_attention_block.appr_geom_fc_y.in_features == 8) assert hasattr(gen_attention_block, 'geom_bias') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 256, 20, 20) gen_attention_block = GeneralizedAttention(256, spatial_range=10) assert hasattr(gen_attention_block, 'local_constraint_map') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, q_stride=2) assert (gen_attention_block.q_downsample is not None) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, kv_stride=2) assert (gen_attention_block.kv_downsample is not None) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) if torch.cuda.is_available(): imgs = torch.randn(2, 16, 20, 20).cuda().to(torch.half) gen_attention_block = GeneralizedAttention(16, spatial_range=(- 1), num_heads=8, attention_type='1111', kv_stride=2) gen_attention_block.cuda().type(torch.half) out = gen_attention_block(imgs) assert (out.shape == imgs.shape)
def test_hsigmoid(): with pytest.raises(AssertionError): HSigmoid(divisor=0) act = HSigmoid() input_shape = torch.Size([1, 3, 64, 64]) input = torch.randn(input_shape) output = act(input) expected_output = torch.min(torch.max(((input + 3) / 6), torch.zeros(input_shape)), torch.ones(input_shape)) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output) act = HSigmoid(1, 2, 0, 1) input_shape = torch.Size([1, 3, 64, 64]) input = torch.randn(input_shape) output = act(input) expected_output = torch.min(torch.max(((input + 1) / 2), torch.zeros(input_shape)), torch.ones(input_shape)) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_hswish(): act = HSwish(inplace=True) assert act.act.inplace act = HSwish() assert (not act.act.inplace) input = torch.randn(1, 3, 64, 64) expected_output = ((input * relu6((input + 3))) / 6) output = act(input) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_build_model_from_cfg(): BACKBONES = mmcv.Registry('backbone', build_func=build_model_from_cfg) @BACKBONES.register_module() class ResNet(nn.Module): def __init__(self, depth, stages=4): super().__init__() self.depth = depth self.stages = stages def forward(self, x): return x @BACKBONES.register_module() class ResNeXt(nn.Module): def __init__(self, depth, stages=4): super().__init__() self.depth = depth self.stages = stages def forward(self, x): return x cfg = dict(type='ResNet', depth=50) model = BACKBONES.build(cfg) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(type='ResNeXt', depth=50, stages=3) model = BACKBONES.build(cfg) assert isinstance(model, ResNeXt) assert ((model.depth == 50) and (model.stages == 3)) cfg = [dict(type='ResNet', depth=50), dict(type='ResNeXt', depth=50, stages=3)] model = BACKBONES.build(cfg) assert isinstance(model, nn.Sequential) assert isinstance(model[0], ResNet) assert ((model[0].depth == 50) and (model[0].stages == 4)) assert isinstance(model[1], ResNeXt) assert ((model[1].depth == 50) and (model[1].stages == 3)) NEW_MODELS = mmcv.Registry('models', parent=MODELS, scope='new') assert (NEW_MODELS.build_func is build_model_from_cfg) def pseudo_build(cfg): return cfg NEW_MODELS = mmcv.Registry('models', parent=MODELS, build_func=pseudo_build) assert (NEW_MODELS.build_func is pseudo_build)
def test_nonlocal(): with pytest.raises(ValueError): _NonLocalNd(3, mode='unsupport_mode') _NonLocalNd(3) _NonLocalNd(3, norm_cfg=dict(type='BN')) _NonLocalNd(3, zeros_init=False) _NonLocalNd(3, norm_cfg=dict(type='BN'), zeros_init=False)
def test_nonlocal3d(): imgs = torch.randn(2, 3, 10, 20, 20) nonlocal_3d = NonLocal3d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='dot_product') assert (nonlocal_3d.mode == 'dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='concatenation') assert (nonlocal_3d.mode == 'concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='gaussian') assert (not hasattr(nonlocal_3d, 'phi')) assert (nonlocal_3d.mode == 'gaussian') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_3d.g, nn.Sequential) and (len(nonlocal_3d.g) == 2)) assert isinstance(nonlocal_3d.g[1], nn.MaxPool3d) assert (nonlocal_3d.g[1].kernel_size == (1, 2, 2)) assert isinstance(nonlocal_3d.phi, nn.MaxPool3d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_3d.g, nonlocal_3d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool3d) assert (m[1].kernel_size == (1, 2, 2)) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape)
def test_nonlocal2d(): imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='gaussian') assert (not hasattr(nonlocal_2d, 'phi')) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) nonlocal_2d = NonLocal2d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_2d.g, nn.Sequential) and (len(nonlocal_2d.g) == 2)) assert isinstance(nonlocal_2d.g[1], nn.MaxPool2d) assert (nonlocal_2d.g[1].kernel_size == (2, 2)) assert isinstance(nonlocal_2d.phi, nn.MaxPool2d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) nonlocal_2d = NonLocal2d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_2d.g, nonlocal_2d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool2d) assert (m[1].kernel_size == (2, 2)) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape)
def test_nonlocal1d(): imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='gaussian') assert (not hasattr(nonlocal_1d, 'phi')) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_1d.g, nn.Sequential) and (len(nonlocal_1d.g) == 2)) assert isinstance(nonlocal_1d.g[1], nn.MaxPool1d) assert (nonlocal_1d.g[1].kernel_size == 2) assert isinstance(nonlocal_1d.phi, nn.MaxPool1d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_1d.g, nonlocal_1d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool1d) assert (m[1].kernel_size == 2) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape)
def test_revert_syncbn(): conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN')) x = torch.randn(1, 3, 10, 10) with pytest.raises(ValueError): y = conv(x) conv = revert_sync_batchnorm(conv) y = conv(x) assert (y.shape == (1, 8, 9, 9))
def test_revert_mmsyncbn(): if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) < 2)): print('Must run on slurm with more than 1 process!\nsrun -p test --gres=gpu:2 -n2') return rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NTASKS']) local_rank = int(os.environ['SLURM_LOCALID']) node_list = str(os.environ['SLURM_NODELIST']) node_parts = re.findall('[0-9]+', node_list) os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}') os.environ['MASTER_PORT'] = '12341' os.environ['WORLD_SIZE'] = str(world_size) os.environ['RANK'] = str(rank) dist.init_process_group('nccl') torch.cuda.set_device(local_rank) x = torch.randn(1, 3, 10, 10).cuda() dist.broadcast(x, src=0) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='MMSyncBN')).cuda() conv.eval() y_mmsyncbn = conv(x).detach().cpu().numpy() conv = revert_sync_batchnorm(conv) y_bn = conv(x).detach().cpu().numpy() assert np.all(np.isclose(y_bn, y_mmsyncbn, 0.001)) (conv, x) = (conv.to('cpu'), x.to('cpu')) y_bn_cpu = conv(x).detach().numpy() assert np.all(np.isclose(y_bn, y_bn_cpu, 0.001))
def test_scale(): scale = Scale() assert (scale.scale.data == 1.0) assert (scale.scale.dtype == torch.float) x = torch.rand(1, 3, 64, 64) output = scale(x) assert (output.shape == (1, 3, 64, 64)) scale = Scale(10.0) assert (scale.scale.data == 10.0) assert (scale.scale.dtype == torch.float) x = torch.rand(1, 3, 64, 64) output = scale(x) assert (output.shape == (1, 3, 64, 64))
def test_swish(): act = Swish() input = torch.randn(1, 3, 64, 64) expected_output = (input * F.sigmoid(input)) output = act(input) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_adaptive_padding(): for padding in ('same', 'corner'): kernel_size = 16 stride = 16 dilation = 1 input = torch.rand(1, 1, 15, 17) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (16, 32)) input = torch.rand(1, 1, 16, 17) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (16, 32)) kernel_size = (2, 2) stride = (2, 2) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (12, 14)) kernel_size = (2, 2) stride = (10, 10) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 10, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (10, 13)) kernel_size = (11, 11) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (21, 21)) input = torch.rand(1, 1, 11, 13) stride = (3, 4) kernel_size = (4, 5) dilation = (2, 2) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) dilation_out = adap_pad(input) assert ((dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)) kernel_size = (7, 9) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) kernel79_out = adap_pad(input) assert ((kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)) assert (kernel79_out.shape == dilation_out.shape) with pytest.raises(AssertionError): AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1)
def test_patch_embed(): B = 2 H = 3 W = 4 C = 3 embed_dims = 10 kernel_size = 3 stride = 1 dummy_input = torch.rand(B, C, H, W) patch_merge_1 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=1, norm_cfg=None) (x1, shape) = patch_merge_1(dummy_input) assert (x1.shape == (2, 2, 10)) assert (shape == (1, 2)) assert ((shape[0] * shape[1]) == x1.shape[1]) B = 2 H = 10 W = 10 C = 3 embed_dims = 10 kernel_size = 5 stride = 2 dummy_input = torch.rand(B, C, H, W) patch_merge_2 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=None) (x2, shape) = patch_merge_2(dummy_input) assert (x2.shape == (2, 1, 10)) assert (shape == (1, 1)) assert ((shape[0] * shape[1]) == x2.shape[1]) stride = 2 input_size = (10, 10) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (x3, shape) = patch_merge_3(dummy_input) assert (x3.shape == (2, 1, 10)) assert (shape == (1, 1)) assert ((shape[0] * shape[1]) == x3.shape[1]) assert (patch_merge_3.init_out_size[1] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1)) assert (patch_merge_3.init_out_size[0] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1)) H = 11 W = 12 input_size = (H, W) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (_, shape) = patch_merge_3(dummy_input) assert (shape == patch_merge_3.init_out_size) input_size = (H, W) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (_, shape) = patch_merge_3(dummy_input) assert (shape == patch_merge_3.init_out_size) for padding in ('same', 'corner'): in_c = 2 embed_dims = 3 B = 2 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 25, 3)) assert (out_size == (5, 5)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 1, 3)) assert (out_size == (1, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 2, 3)) assert (out_size == (2, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 3, 3)) assert (out_size == (1, 3)) assert (x_out.size(1) == (out_size[0] * out_size[1]))
def test_patch_merging(): in_c = 3 out_c = 4 kernel_size = 3 stride = 3 padding = 1 dilation = 1 bias = False patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (B, L, C) = (1, 100, 3) input_size = (10, 10) x = torch.rand(B, L, C) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (1, 16, 4)) assert (out_size == (4, 4)) assert (x_out.size(1) == (out_size[0] * out_size[1])) in_c = 4 out_c = 5 kernel_size = 6 stride = 3 padding = 2 dilation = 2 bias = False patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (B, L, C) = (1, 100, 4) input_size = (10, 10) x = torch.rand(B, L, C) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (1, 4, 5)) assert (out_size == (2, 2)) assert (x_out.size(1) == (out_size[0] * out_size[1])) for padding in ('same', 'corner'): in_c = 2 out_c = 3 B = 2 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 25, 3)) assert (out_size == (5, 5)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 1, 3)) assert (out_size == (1, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 2, 3)) assert (out_size == (2, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 3, 3)) assert (out_size == (1, 3)) assert (x_out.size(1) == (out_size[0] * out_size[1]))
def test_multiheadattention(): MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='Dropout', drop_prob=0.0), batch_first=True) batch_dim = 2 embed_dim = 5 num_query = 100 attn_batch_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=True) attn_query_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=False) param_dict = dict(attn_query_first.named_parameters()) for (n, v) in attn_batch_first.named_parameters(): param_dict[n].data = v.data input_batch_first = torch.rand(batch_dim, num_query, embed_dim) input_query_first = input_batch_first.transpose(0, 1) assert torch.allclose(attn_query_first(input_query_first).sum(), attn_batch_first(input_batch_first).sum()) key_batch_first = torch.rand(batch_dim, num_query, embed_dim) key_query_first = key_batch_first.transpose(0, 1) assert torch.allclose(attn_query_first(input_query_first, key_query_first).sum(), attn_batch_first(input_batch_first, key_batch_first).sum()) identity = torch.ones_like(input_query_first) assert torch.allclose(attn_query_first(input_query_first, key_query_first, residual=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum())) assert torch.allclose(attn_query_first(input_query_first, key_query_first, identity=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum())) (attn_query_first(input_query_first, key_query_first, identity=identity).sum(),)
def test_ffn(): with pytest.raises(AssertionError): FFN(num_fcs=1) FFN(dropout=0, add_residual=True) ffn = FFN(dropout=0, add_identity=True) input_tensor = torch.rand(2, 20, 256) input_tensor_nbc = input_tensor.transpose(0, 1) assert torch.allclose(ffn(input_tensor).sum(), ffn(input_tensor_nbc).sum()) residual = torch.rand_like(input_tensor) torch.allclose(ffn(input_tensor, residual=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum())) torch.allclose(ffn(input_tensor, identity=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum()))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='Cuda not available') def test_basetransformerlayer_cuda(): operation_order = ('self_attn', 'ffn') baselayer = BaseTransformerLayer(operation_order=operation_order, batch_first=True, attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8)) baselayers = ModuleList([copy.deepcopy(baselayer) for _ in range(2)]) baselayers.to('cuda') x = torch.rand(2, 10, 256).cuda() for m in baselayers: x = m(x) assert (x.shape == torch.Size([2, 10, 256]))