code
stringlengths
17
6.64M
def check_time(timer_id): "Add check points in a single line.\n\n This method is suitable for running a task on a list of items. A timer will\n be registered when the method is called for the first time.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> for i in range(1, 6):\n >>> # simulate a code block\n >>> time.sleep(i)\n >>> mmcv.check_time('task1')\n 2.000\n 3.000\n 4.000\n 5.000\n\n Args:\n str: Timer identifier.\n " if (timer_id not in _g_timers): _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
def is_jit_tracing() -> bool: if ((torch.__version__ != 'parrots') and (digit_version(torch.__version__) >= digit_version('1.6.0'))): on_trace = torch.jit.is_tracing() if isinstance(on_trace, bool): return on_trace else: return torch._C._is_tracing() else: warnings.warn('torch.jit.is_tracing is only supported after v1.6.0. Therefore is_tracing returns False automatically. Please set on_trace manually if you are using trace.', UserWarning) return False
def digit_version(version_str: str, length: int=4): 'Convert a version string into a tuple of integers.\n\n This method is usually used for comparing two versions. For pre-release\n versions: alpha < beta < rc.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int]: The version info in digits (integers).\n ' assert ('parrots' not in version_str) version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: mapping = {'a': (- 3), 'b': (- 2), 'rc': (- 1)} val = (- 4) if version.pre: if (version.pre[0] not in mapping): warnings.warn(f'unknown prerelease version {version.pre[0]}, version checking may go wrong') else: val = mapping[version.pre[0]] release.extend([val, version.pre[(- 1)]]) else: release.extend([val, 0]) elif version.is_postrelease: release.extend([1, version.post]) else: release.extend([0, 0]) return tuple(release)
def _minimal_ext_cmd(cmd): env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if (v is not None): env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out
def get_git_hash(fallback='unknown', digits=None): "Get the git hash of the current repo.\n\n Args:\n fallback (str, optional): The fallback string when git hash is\n unavailable. Defaults to 'unknown'.\n digits (int, optional): kept digits of the hash. Defaults to None,\n meaning all digits are kept.\n\n Returns:\n str: Git commit hash.\n " if ((digits is not None) and (not isinstance(digits, int))): raise TypeError('digits must be None or an integer') try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') if (digits is not None): sha = sha[:digits] except OSError: sha = fallback return sha
def parse_version_info(version_str: str, length: int=4) -> tuple: 'Parse a version string into a tuple.\n\n Args:\n version_str (str): The version string.\n length (int): The maximum number of version levels. Default: 4.\n\n Returns:\n tuple[int | str]: The version info, e.g., "1.3.0" is parsed into\n (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into\n (2, 0, 0, 0, \'rc\', 1) (when length is set to 4).\n ' from packaging.version import parse version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if (len(release) < length): release = (release + ([0] * (length - len(release)))) if version.is_prerelease: release.extend(list(version.pre)) elif version.is_postrelease: release.extend(list(version.post)) else: release.extend([0, 0]) return tuple(release)
class Cache(): def __init__(self, capacity): self._cache = OrderedDict() self._capacity = int(capacity) if (capacity <= 0): raise ValueError('capacity must be a positive integer') @property def capacity(self): return self._capacity @property def size(self): return len(self._cache) def put(self, key, val): if (key in self._cache): return if (len(self._cache) >= self.capacity): self._cache.popitem(last=False) self._cache[key] = val def get(self, key, default=None): val = (self._cache[key] if (key in self._cache) else default) return val
class VideoReader(): "Video class with similar usage to a list object.\n\n This video warpper class provides convenient apis to access frames.\n There exists an issue of OpenCV's VideoCapture class that jumping to a\n certain frame may be inaccurate. It is fixed in this class by checking\n the position after jumping each time.\n Cache is used when decoding videos. So if the same frame is visited for\n the second time, there is no need to decode again if it is stored in the\n cache.\n\n Examples:\n >>> import mmcv\n >>> v = mmcv.VideoReader('sample.mp4')\n >>> len(v) # get the total frame number with `len()`\n 120\n >>> for img in v: # v is iterable\n >>> mmcv.imshow(img)\n >>> v[5] # get the 6th frame\n " def __init__(self, filename, cache_capacity=10): if (not filename.startswith(('https://', 'http://'))): check_file_exist(filename, ('Video file not found: ' + filename)) self._vcap = cv2.VideoCapture(filename) assert (cache_capacity > 0) self._cache = Cache(cache_capacity) self._position = 0 self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) self._fps = self._vcap.get(CAP_PROP_FPS) self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) self._fourcc = self._vcap.get(CAP_PROP_FOURCC) @property def vcap(self): ':obj:`cv2.VideoCapture`: The raw VideoCapture object.' return self._vcap @property def opened(self): 'bool: Indicate whether the video is opened.' return self._vcap.isOpened() @property def width(self): 'int: Width of video frames.' return self._width @property def height(self): 'int: Height of video frames.' return self._height @property def resolution(self): 'tuple: Video resolution (width, height).' return (self._width, self._height) @property def fps(self): 'float: FPS of the video.' return self._fps @property def frame_cnt(self): 'int: Total frames of the video.' return self._frame_cnt @property def fourcc(self): 'str: "Four character code" of the video.' return self._fourcc @property def position(self): 'int: Current cursor position, indicating frame decoded.' return self._position def _get_real_position(self): return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) def _set_real_position(self, frame_id): self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) pos = self._get_real_position() for _ in range((frame_id - pos)): self._vcap.read() self._position = frame_id def read(self): 'Read the next frame.\n\n If the next frame have been decoded before and in the cache, then\n return it directly, otherwise decode, cache and return it.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if self._cache: img = self._cache.get(self._position) if (img is not None): ret = True else: if (self._position != self._get_real_position()): self._set_real_position(self._position) (ret, img) = self._vcap.read() if ret: self._cache.put(self._position, img) else: (ret, img) = self._vcap.read() if ret: self._position += 1 return img def get_frame(self, frame_id): 'Get frame by index.\n\n Args:\n frame_id (int): Index of the expected frame, 0-based.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if ((frame_id < 0) or (frame_id >= self._frame_cnt)): raise IndexError(f'"frame_id" must be between 0 and {(self._frame_cnt - 1)}') if (frame_id == self._position): return self.read() if self._cache: img = self._cache.get(frame_id) if (img is not None): self._position = (frame_id + 1) return img self._set_real_position(frame_id) (ret, img) = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img def current_frame(self): 'Get the current frame (frame that is just visited).\n\n Returns:\n ndarray or None: If the video is fresh, return None, otherwise\n return the frame.\n ' if (self._position == 0): return None return self._cache.get((self._position - 1)) def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): 'Convert a video to frame images.\n\n Args:\n frame_dir (str): Output directory to store all the frame images.\n file_start (int): Filenames will start from the specified number.\n filename_tmpl (str): Filename template with the index as the\n placeholder.\n start (int): The starting frame index.\n max_num (int): Maximum number of frames to be written.\n show_progress (bool): Whether to show a progress bar.\n ' mkdir_or_exist(frame_dir) if (max_num == 0): task_num = (self.frame_cnt - start) else: task_num = min((self.frame_cnt - start), max_num) if (task_num <= 0): raise ValueError('start must be less than total frame number') if (start > 0): self._set_real_position(start) def write_frame(file_idx): img = self.read() if (img is None): return filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, (file_start + task_num))) else: for i in range(task_num): write_frame((file_start + i)) def __len__(self): return self.frame_cnt def __getitem__(self, index): if isinstance(index, slice): return [self.get_frame(i) for i in range(*index.indices(self.frame_cnt))] if (index < 0): index += self.frame_cnt if (index < 0): raise IndexError('index out of range') return self.get_frame(index) def __iter__(self): self._set_real_position(0) return self def __next__(self): img = self.read() if (img is not None): return img else: raise StopIteration next = __next__ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._vcap.release()
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): 'Read the frame images from a directory and join them as a video.\n\n Args:\n frame_dir (str): The directory containing video frames.\n video_file (str): Output filename.\n fps (float): FPS of the output video.\n fourcc (str): Fourcc of the output video, this should be compatible\n with the output file type.\n filename_tmpl (str): Filename template with the index as the variable.\n start (int): Starting frame index.\n end (int): Ending frame index.\n show_progress (bool): Whether to show a progress bar.\n ' if (end == 0): ext = filename_tmpl.split('.')[(- 1)] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, ('The start frame not found: ' + first_file)) img = cv2.imread(first_file) (height, width) = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): write_frame(i) vwriter.release()
@requires_executable('ffmpeg') def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): 'Convert a video with ffmpeg.\n\n This provides a general api to ffmpeg, the executed command is::\n\n `ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`\n\n Options(kwargs) are mapped to ffmpeg commands with the following rules:\n\n - key=val: "-key val"\n - key=True: "-key"\n - key=False: ""\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n pre_options (str): Options appears before "-i <in_file>".\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = [] for (k, v) in kwargs.items(): if isinstance(v, bool): if v: options.append(f'-{k}') elif (k == 'log_level'): assert (v in ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace']) options.append(f'-loglevel {v}') else: options.append(f'-{k} {v}') cmd = f"ffmpeg -y {pre_options} -i {in_file} {' '.join(options)} {out_file}" if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
@requires_executable('ffmpeg') def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False): 'Resize a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\n ratio (tuple or float): Expected resize ratio, (2, 0.5) means\n (w*2, h*0.5).\n keep_ar (bool): Whether to keep original aspect ratio.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' if ((size is None) and (ratio is None)): raise ValueError('expected size or ratio must be specified') if ((size is not None) and (ratio is not None)): raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if (not keep_ar): options['vf'] = f'scale={size[0]}:{size[1]}' else: options['vf'] = f'scale=w={size[0]}:h={size[1]}:force_original_aspect_ratio=decrease' else: if (not isinstance(ratio, tuple)): ratio = (ratio, ratio) options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"' convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Cut a clip from a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n start (None or float): Start time (in seconds).\n end (None or float): End time (in seconds).\n vcodec (None or str): Output video codec, None for unchanged.\n acodec (None or str): Output audio codec, None for unchanged.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = (end - start) convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False): 'Concatenate multiple videos into a single one.\n\n Args:\n video_list (list): A list of video filenames\n out_file (str): Output video filename\n vcodec (None or str): Output video codec, None for unchanged\n acodec (None or str): Output audio codec, None for unchanged\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' (tmp_filehandler, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write(f'''file {osp.abspath(filename)} ''') options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.close(tmp_filehandler) os.remove(tmp_filename)
class Color(Enum): 'An enum that defines common colors.\n\n Contains red, green, blue, cyan, yellow, magenta, white and black.\n ' red = (0, 0, 255) green = (0, 255, 0) blue = (255, 0, 0) cyan = (255, 255, 0) yellow = (0, 255, 255) magenta = (255, 0, 255) white = (255, 255, 255) black = (0, 0, 0)
def color_val(color): 'Convert various input to color tuples.\n\n Args:\n color (:obj:`Color`/str/tuple/int/ndarray): Color inputs\n\n Returns:\n tuple[int]: A tuple of 3 integers indicating BGR channels.\n ' if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert (len(color) == 3) for channel in color: assert (0 <= channel <= 255) return color elif isinstance(color, int): assert (0 <= color <= 255) return (color, color, color) elif isinstance(color, np.ndarray): assert ((color.ndim == 1) and (color.size == 3)) assert np.all(((color >= 0) & (color <= 255))) color = color.astype(np.uint8) return tuple(color) else: raise TypeError(f'Invalid type for color: {type(color)}')
def choose_requirement(primary, secondary): 'If some version of primary requirement installed, return primary, else\n return secondary.' try: name = re.split('[!<>=]', primary)[0] get_distribution(name) except DistributionNotFound: return secondary return str(primary)
def get_version(): version_file = 'mmcv/version.py' with open(version_file, 'r', encoding='utf-8') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True): 'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n ' import sys from os.path import exists require_fpath = fname def parse_line(line): 'Parse information from a line in a requirements text file.' if line.startswith('-r '): target = line.split(' ')[1] for info in parse_require_file(target): (yield info) else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] else: pat = (('(' + '|'.join(['>=', '==', '>'])) + ')') parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if (len(parts) > 1): (op, rest) = parts[1:] if (';' in rest): (version, platform_deps) = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest info['version'] = (op, version) (yield info) def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if (line and (not line.startswith('#'))): for info in parse_line(line): (yield info) def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if (with_version and ('version' in info)): parts.extend(info['version']) if (not sys.version.startswith('3.4')): platform_deps = info.get('platform_deps') if (platform_deps is not None): parts.append((';' + platform_deps)) item = ''.join(parts) (yield item) packages = list(gen_packages_items()) return packages
def get_extensions(): extensions = [] if (os.getenv('MMCV_WITH_TRT', '0') != '0'): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom TensorRT Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_trt' from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] tensorrt_path = os.getenv('TENSORRT_DIR', '0') tensorrt_lib_path = glob.glob(os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0] library_dirs += [tensorrt_lib_path] libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin'] libraries += ['cudart'] define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda') include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt') include_dirs.append(include_path) include_dirs.append(include_trt_path) include_dirs.append(os.path.join(tensorrt_path, 'include')) include_dirs += include_paths(cuda=True) op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*') define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('MMCV_WITH_TRT', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) extra_compile_args['nvcc'] += ['-Xcompiler=-fno-gnu-unique'] library_dirs += library_paths(cuda=True) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) if (os.getenv('MMCV_WITH_OPS', '0') == '0'): return extensions if (EXT_TYPE == 'parrots'): ext_name = 'mmcv._ext' from parrots.utils.build_extension import Extension define_macros = [] include_dirs = [] op_files = ((glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/parrots/*.cpp')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args = {'nvcc': ([cuda_args, '-std=c++14'] if cuda_args else ['-std=c++14']), 'cxx': ['-std=c++14']} if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] extra_compile_args['nvcc'] += ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'] ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, cuda=True, pytorch=True) extensions.append(ext_ops) elif (EXT_TYPE == 'pytorch'): ext_name = 'mmcv._ext' from torch.utils.cpp_extension import CppExtension, CUDAExtension try: import psutil num_cpu = len(psutil.Process().cpu_affinity()) cpu_use = max(4, (num_cpu - 1)) except (ModuleNotFoundError, AttributeError): cpu_use = 4 os.environ.setdefault('MAX_JOBS', str(cpu_use)) define_macros = [] extra_compile_args = {'cxx': []} if (platform.system() != 'Windows'): extra_compile_args['cxx'] = ['-std=c++14'] include_dirs = [] is_rocm_pytorch = False try: from torch.utils.cpp_extension import ROCM_HOME is_rocm_pytorch = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass project_dir = 'mmcv/ops/csrc/' if is_rocm_pytorch: from torch.utils.hipify import hipify_python hipify_python.hipify(project_directory=project_dir, output_directory=project_dir, includes='mmcv/ops/csrc/*', show_detailed=True, is_pytorch_extension=True) define_macros += [('MMCV_WITH_CUDA', None)] define_macros += [('HIP_DIFF', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (glob.glob('./mmcv/ops/csrc/pytorch/hip/*') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/hip/*')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip')) elif (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files = (((glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu')) + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')) extension = CUDAExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) else: print(f'Compiling {ext_name} without CUDA') op_files = (glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')) extension = CppExtension include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) if (('nvcc' in extra_compile_args) and (platform.system() != 'Windows')): extra_compile_args['nvcc'] += ['-std=c++14'] ext_ops = extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args) extensions.append(ext_ops) if ((EXT_TYPE == 'pytorch') and (os.getenv('MMCV_WITH_ORT', '0') != '0')): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += ('DeprecationWarning: ' + 'Custom ONNXRuntime Ops will be deprecated in future. ') msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ext_name = 'mmcv._ext_ort' import onnxruntime from torch.utils.cpp_extension import include_paths, library_paths library_dirs = [] libraries = [] include_dirs = [] ort_path = os.getenv('ONNXRUNTIME_DIR', '0') library_dirs += [os.path.join(ort_path, 'lib')] libraries.append('onnxruntime') define_macros = [] extra_compile_args = {'cxx': []} include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime') include_dirs.append(include_path) include_dirs.append(os.path.join(ort_path, 'include')) op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*') if ((onnxruntime.get_device() == 'GPU') or (os.getenv('FORCE_CUDA', '0') == '1')): define_macros += [('MMCV_WITH_CUDA', None)] cuda_args = os.getenv('MMCV_CUDA_ARGS') extra_compile_args['nvcc'] = ([cuda_args] if cuda_args else []) op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*') include_dirs += include_paths(cuda=True) library_dirs += library_paths(cuda=True) else: include_dirs += include_paths(cuda=False) library_dirs += library_paths(cuda=False) from setuptools import Extension ext_ops = Extension(name=ext_name, sources=op_files, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, language='c++', library_dirs=library_dirs, libraries=libraries) extensions.append(ext_ops) return extensions
def test_quantize(): arr = np.random.randn(10, 10) levels = 20 qarr = mmcv.quantize(arr, (- 1), 1, levels) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('int64')) for i in range(arr.shape[0]): for j in range(arr.shape[1]): ref = min((levels - 1), int(np.floor((10 * (1 + max(min(arr[(i, j)], 1), (- 1))))))) assert (qarr[(i, j)] == ref) qarr = mmcv.quantize(arr, (- 1), 1, 20, dtype=np.uint8) assert (qarr.shape == arr.shape) assert (qarr.dtype == np.dtype('uint8')) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.quantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.quantize(arr, 2, 1, levels)
def test_dequantize(): levels = 20 qarr = np.random.randint(levels, size=(10, 10)) arr = mmcv.dequantize(qarr, (- 1), 1, levels) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float64')) for i in range(qarr.shape[0]): for j in range(qarr.shape[1]): assert (arr[(i, j)] == (((qarr[(i, j)] + 0.5) / 10) - 1)) arr = mmcv.dequantize(qarr, (- 1), 1, levels, dtype=np.float32) assert (arr.shape == qarr.shape) assert (arr.dtype == np.dtype('float32')) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=0) with pytest.raises(ValueError): mmcv.dequantize(arr, (- 1), 1, levels=10.0) with pytest.raises(ValueError): mmcv.dequantize(arr, 2, 1, levels)
def test_joint(): arr = np.random.randn(100, 100) levels = 1000 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert (np.abs((recover[(arr < (- 1))] + 0.999)).max() < 1e-06) assert (np.abs((recover[(arr > 1)] - 0.999)).max() < 1e-06) assert (np.abs((recover - arr)[((arr >= (- 1)) & (arr <= 1))]).max() <= 0.001) arr = np.clip((np.random.randn(100) / 1000), (- 0.01), 0.01) levels = 99 qarr = mmcv.quantize(arr, (- 1), 1, levels) recover = mmcv.dequantize(qarr, (- 1), 1, levels) assert np.all((recover == 0))
def test_build_conv_layer(): with pytest.raises(TypeError): cfg = 'Conv2d' build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(kernel_size=3) build_conv_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyConv') build_conv_layer(cfg) kwargs = dict(in_channels=4, out_channels=8, kernel_size=3, groups=2, dilation=2) cfg = None layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='Conv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.Conv2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) cfg = dict(type='deconv') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.groups == kwargs['groups']) assert (layer.dilation == (kwargs['dilation'], kwargs['dilation'])) kwargs.pop('groups') for (type_name, module) in CONV_LAYERS.module_dict.items(): cfg = dict(type=type_name) if ((type_name == 'SparseInverseConv2d') or (type_name == 'SparseInverseConv3d')): kwargs.pop('dilation') layer = build_conv_layer(cfg, **kwargs) assert isinstance(layer, module) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) kwargs['dilation'] = 2
def test_infer_norm_abbr(): with pytest.raises(TypeError): infer_norm_abbr(0) class MyNorm(): _abbr_ = 'mn' assert (infer_norm_abbr(MyNorm) == 'mn') class FancyBatchNorm(): pass assert (infer_norm_abbr(FancyBatchNorm) == 'bn') class FancyInstanceNorm(): pass assert (infer_norm_abbr(FancyInstanceNorm) == 'in') class FancyLayerNorm(): pass assert (infer_norm_abbr(FancyLayerNorm) == 'ln') class FancyGroupNorm(): pass assert (infer_norm_abbr(FancyGroupNorm) == 'gn') class FancyNorm(): pass assert (infer_norm_abbr(FancyNorm) == 'norm_layer')
def test_build_norm_layer(): with pytest.raises(TypeError): cfg = 'BN' build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict() build_norm_layer(cfg, 3) with pytest.raises(KeyError): cfg = dict(type='FancyNorm') build_norm_layer(cfg, 3) with pytest.raises(AssertionError): cfg = dict(type='BN') build_norm_layer(cfg, 3, postfix=[1, 2]) with pytest.raises(AssertionError): cfg = dict(type='GN') build_norm_layer(cfg, 3) abbr_mapping = {'BN': 'bn', 'BN1d': 'bn', 'BN2d': 'bn', 'BN3d': 'bn', 'SyncBN': 'bn', 'GN': 'gn', 'LN': 'ln', 'IN': 'in', 'IN1d': 'in', 'IN2d': 'in', 'IN3d': 'in'} for (type_name, module) in NORM_LAYERS.module_dict.items(): if (type_name == 'MMSyncBN'): continue for postfix in ['_test', 1]: cfg = dict(type=type_name) if (type_name == 'GN'): cfg['num_groups'] = 2 (name, layer) = build_norm_layer(cfg, 3, postfix=postfix) assert (name == (abbr_mapping[type_name] + str(postfix))) assert isinstance(layer, module) if (type_name == 'GN'): assert (layer.num_channels == 3) assert (layer.num_groups == cfg['num_groups']) elif (type_name != 'LN'): assert (layer.num_features == 3)
def test_build_activation_layer(): with pytest.raises(TypeError): cfg = 'ReLU' build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyReLU') build_activation_layer(cfg) for (type_name, module) in ACTIVATION_LAYERS.module_dict.items(): cfg['type'] = type_name layer = build_activation_layer(cfg) assert isinstance(layer, module) act = build_activation_layer(dict(type='Clamp')) x = (torch.randn(10) * 1000) y = act(x) assert np.logical_and((y >= (- 1)).numpy(), (y <= 1).numpy()).all() act = build_activation_layer(dict(type='Clip', min=0)) y = act(x) assert np.logical_and((y >= 0).numpy(), (y <= 1).numpy()).all() act = build_activation_layer(dict(type='Clamp', max=0)) y = act(x) assert np.logical_and((y >= (- 1)).numpy(), (y <= 0).numpy()).all()
def test_build_padding_layer(): with pytest.raises(TypeError): cfg = 'reflect' build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPad') build_padding_layer(cfg) for (type_name, module) in PADDING_LAYERS.module_dict.items(): cfg['type'] = type_name layer = build_padding_layer(cfg, 2) assert isinstance(layer, module) input_x = torch.randn(1, 2, 5, 5) cfg = dict(type='reflect') padding_layer = build_padding_layer(cfg, 2) res = padding_layer(input_x) assert (res.shape == (1, 2, 9, 9))
def test_upsample_layer(): with pytest.raises(TypeError): cfg = 'bilinear' build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyUpsample') build_upsample_layer(cfg) for type_name in ['nearest', 'bilinear']: cfg['type'] = type_name layer = build_upsample_layer(cfg) assert isinstance(layer, nn.Upsample) assert (layer.mode == type_name) cfg = dict(type='deconv', in_channels=3, out_channels=3, kernel_size=3, stride=2) layer = build_upsample_layer(cfg) assert isinstance(layer, nn.ConvTranspose2d) cfg = dict(type='deconv') kwargs = dict(in_channels=3, out_channels=3, kernel_size=3, stride=2) layer = build_upsample_layer(cfg, **kwargs) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.stride == (kwargs['stride'], kwargs['stride'])) layer = build_upsample_layer(cfg, 3, 3, 3, 2) assert isinstance(layer, nn.ConvTranspose2d) assert (layer.in_channels == kwargs['in_channels']) assert (layer.out_channels == kwargs['out_channels']) assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size'])) assert (layer.stride == (kwargs['stride'], kwargs['stride'])) cfg = dict(type='pixel_shuffle', in_channels=3, out_channels=3, scale_factor=2, upsample_kernel=3) layer = build_upsample_layer(cfg) assert isinstance(layer, PixelShufflePack) assert (layer.scale_factor == 2) assert (layer.upsample_kernel == 3)
def test_pixel_shuffle_pack(): x_in = torch.rand(2, 3, 10, 10) pixel_shuffle = PixelShufflePack(3, 3, scale_factor=2, upsample_kernel=3) assert (pixel_shuffle.upsample_conv.kernel_size == (3, 3)) x_out = pixel_shuffle(x_in) assert (x_out.shape == (2, 3, 20, 20))
def test_is_norm(): norm_set1 = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm] norm_set2 = [nn.GroupNorm] for norm_type in norm_set1: layer = norm_type(3) assert is_norm(layer) assert (not is_norm(layer, exclude=(norm_type,))) for norm_type in norm_set2: layer = norm_type(3, 6) assert is_norm(layer) assert (not is_norm(layer, exclude=(norm_type,))) class MyNorm(nn.BatchNorm2d): pass layer = MyNorm(3) assert is_norm(layer) assert (not is_norm(layer, exclude=_BatchNorm)) assert (not is_norm(layer, exclude=(_BatchNorm,))) layer = nn.Conv2d(3, 8, 1) assert (not is_norm(layer)) with pytest.raises(TypeError): layer = nn.BatchNorm1d(3) is_norm(layer, exclude='BN') with pytest.raises(TypeError): layer = nn.BatchNorm1d(3) is_norm(layer, exclude=('BN',))
def test_infer_plugin_abbr(): with pytest.raises(TypeError): infer_plugin_abbr(0) class MyPlugin(): _abbr_ = 'mp' assert (infer_plugin_abbr(MyPlugin) == 'mp') class FancyPlugin(): pass assert (infer_plugin_abbr(FancyPlugin) == 'fancy_plugin')
def test_build_plugin_layer(): with pytest.raises(TypeError): cfg = 'Plugin' build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPlugin') build_plugin_layer(cfg) with pytest.raises(AssertionError): cfg = dict(type='ConvModule') build_plugin_layer(cfg, postfix=[1, 2]) for postfix in ['', '_test', 1]: cfg = dict(type='ContextBlock') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, ratio=(1.0 / 4)) assert (name == ('context_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['ContextBlock']) for postfix in ['', '_test', 1]: cfg = dict(type='GeneralizedAttention') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16) assert (name == ('gen_attention_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['GeneralizedAttention']) for postfix in ['', '_test', 1]: cfg = dict(type='NonLocal2d') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16) assert (name == ('nonlocal_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['NonLocal2d']) for postfix in ['', '_test', 1]: cfg = dict(type='ConvModule') (name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, out_channels=4, kernel_size=3) assert (name == ('conv_block' + str(postfix))) assert isinstance(layer, PLUGIN_LAYERS.module_dict['ConvModule'])
def test_context_block(): with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), pooling_type='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fusion_types='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fusion_types=('unsupport_type',)) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), pooling_type='att') out = context_block(imgs) assert (context_block.conv_mask.in_channels == 16) assert (context_block.conv_mask.out_channels == 1) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), pooling_type='avg') out = context_block(imgs) assert hasattr(context_block, 'avg_pool') assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add',)) out = context_block(imgs) assert (context_block.channel_add_conv is not None) assert (context_block.channel_mul_conv is None) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_mul',)) out = context_block(imgs) assert (context_block.channel_add_conv is None) assert (context_block.channel_mul_conv is not None) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add', 'channel_mul')) out = context_block(imgs) assert (context_block.channel_add_conv is not None) assert (context_block.channel_mul_conv is not None) assert (out.shape == imgs.shape)
def test_conv2d_samepadding(): inputs = torch.rand((1, 3, 28, 28)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output = conv(inputs) assert (output.shape == inputs.shape) inputs = torch.rand((1, 3, 13, 13)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output = conv(inputs) assert (output.shape == inputs.shape) inputs = torch.rand((1, 3, 28, 28)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) output = conv(inputs) assert (output.shape == torch.Size([1, 3, 14, 14])) inputs = torch.rand((1, 3, 13, 13)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) output = conv(inputs) assert (output.shape == torch.Size([1, 3, 7, 7]))
@CONV_LAYERS.register_module() class ExampleConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None): super(ExampleConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.bias = bias self.norm_cfg = norm_cfg self.output_padding = (0, 0, 0) self.transposed = False self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size) self.init_weights() def forward(self, x): x = self.conv0(x) return x def init_weights(self): nn.init.constant_(self.conv0.weight, 0)
def test_conv_module(): with pytest.raises(AssertionError): conv_cfg = 'conv' ConvModule(3, 8, 2, conv_cfg=conv_cfg) with pytest.raises(AssertionError): norm_cfg = 'norm' ConvModule(3, 8, 2, norm_cfg=norm_cfg) with pytest.raises(KeyError): act_cfg = dict(type='softmax') ConvModule(3, 8, 2, act_cfg=act_cfg) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert conv.with_activation assert hasattr(conv, 'activate') assert conv.with_norm assert hasattr(conv, 'norm') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = ConvModule(3, 8, 2) assert conv.with_activation assert hasattr(conv, 'activate') assert (not conv.with_norm) assert (conv.norm is None) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = ConvModule(3, 8, 2, act_cfg=None) assert (not conv.with_norm) assert (conv.norm is None) assert (not conv.with_activation) assert (not hasattr(conv, 'activate')) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv_module = ConvModule(3, 8, 2, conv_cfg=dict(type='ExampleConv'), act_cfg=None) assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2)) conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True) assert hasattr(conv.conv, 'weight_orig') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect') assert isinstance(conv.padding_layer, nn.ReflectionPad2d) output = conv(x) assert (output.shape == (1, 8, 256, 256)) with pytest.raises(KeyError): conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists') conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU')) assert isinstance(conv.activate, nn.LeakyReLU) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh')) assert isinstance(conv.activate, nn.Tanh) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid')) assert isinstance(conv.activate, nn.Sigmoid) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU')) assert isinstance(conv.activate, nn.PReLU) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish')) if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.7'))): assert isinstance(conv.activate, HSwish) else: assert isinstance(conv.activate, nn.Hardswish) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid')) assert isinstance(conv.activate, HSigmoid) output = conv(x) assert (output.shape == (1, 8, 256, 256))
def test_bias(): conv = ConvModule(3, 8, 2) assert (conv.conv.bias is not None) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert (conv.conv.bias is None) conv = ConvModule(3, 8, 2, bias=False) assert (conv.conv.bias is None) with pytest.warns(UserWarning) as record: ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='BN')) assert (len(record) == 1) assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm') with pytest.warns(UserWarning) as record: ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='IN')) assert (len(record) == 1) assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm') with pytest.warns(UserWarning) as record: norm_cfg = dict(type='GN', num_groups=1) ConvModule(3, 8, 2, bias=True, norm_cfg=norm_cfg) warnings.warn('No warnings') assert (len(record) == 1) assert (record[0].message.args[0] == 'No warnings')
def conv_forward(self, x): return (x + '_conv')
def bn_forward(self, x): return (x + '_bn')
def relu_forward(self, x): return (x + '_relu')
@patch('torch.nn.ReLU.forward', relu_forward) @patch('torch.nn.BatchNorm2d.forward', bn_forward) @patch('torch.nn.Conv2d.forward', conv_forward) def test_order(): with pytest.raises(AssertionError): order = ['conv', 'norm', 'act'] ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm') ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm', 'norm') ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionError): order = ('conv', 'norm', 'something') ConvModule(3, 8, 2, order=order) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input') assert (out == 'input_conv_bn_relu') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'), order=('norm', 'conv', 'act')) out = conv('input') assert (out == 'input_bn_conv_relu') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input', activate=False) assert (out == 'input_conv_bn') conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) out = conv('input', norm=False) assert (out == 'input_conv_relu')
def test_depthwise_separable_conv(): with pytest.raises(AssertionError): DepthwiseSeparableConvModule(4, 8, 2, groups=2) conv = DepthwiseSeparableConvModule(3, 8, 2) assert (conv.depthwise_conv.conv.groups == 3) assert (conv.pointwise_conv.conv.kernel_size == (1, 1)) assert (not conv.depthwise_conv.with_norm) assert (not conv.pointwise_conv.with_norm) assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN')) assert (conv.depthwise_conv.norm_name == 'bn') assert (not conv.pointwise_conv.with_norm) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN')) assert (not conv.depthwise_conv.with_norm) assert (conv.pointwise_conv.norm_name == 'bn') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert (conv.depthwise_conv.norm_name == 'bn') assert (conv.pointwise_conv.norm_name == 'bn') x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act')) x = torch.rand(1, 3, 256, 256) output = conv(x) assert (output.shape == (1, 8, 255, 255)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, with_spectral_norm=True) assert hasattr(conv.depthwise_conv.conv, 'weight_orig') assert hasattr(conv.pointwise_conv.conv, 'weight_orig') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, padding_mode='reflect') assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d) output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256)) conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU')) assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU') assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU') output = conv(x) assert (output.shape == (1, 8, 256, 256))
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(3, 8, 3) def forward(self, imgs): x = torch.randn((1, *imgs)) return self.conv2d(x)
def input_constructor(x): return dict(imgs=x)
def test_flops_counter(): with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = [1, 3, 16, 16] get_model_complexity_info(model, input_res) with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = tuple() get_model_complexity_info(model, input_res) for item in gt_results: model = item['model'] input = item['input'] (flops, params) = get_model_complexity_info(model, input, as_strings=False, print_per_layer_stat=False) assert ((flops == item['flops']) and (params == item['params'])) model = ExampleModel() x = (3, 16, 16) (flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=False, input_constructor=input_constructor) assert ((flops == 43904.0) and (params == 224.0)) model = nn.Conv3d(3, 8, 3) x = (3, 3, 512, 512) (flops, params) = get_model_complexity_info(model, x, print_per_layer_stat=False) assert ((flops == '0.17 GFLOPs') and (params == str(656))) model = nn.Conv1d(3, 8, 3) x = (3, 16) out = StringIO() get_model_complexity_info(model, x, ost=out) assert (out.getvalue() == 'Conv1d(0.0 M, 100.000% Params, 0.0 GFLOPs, 100.000% FLOPs, 3, 8, kernel_size=(3,), stride=(1,))\n') model = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Flatten(), nn.Linear(1568, 2)) x = (3, 16, 16) (flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=True) assert ((flops == 47040.0) and (params == 3362))
def test_flops_to_string(): flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops) == '6.54 GFLOPs') assert (flops_to_string(flops, 'MFLOPs') == '6543.21 MFLOPs') assert (flops_to_string(flops, 'KFLOPs') == '6543210.0 KFLOPs') assert (flops_to_string(flops, 'FLOPs') == '6543210000.0 FLOPs') assert (flops_to_string(flops, precision=4) == '6.5432 GFLOPs') flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops, None) == '6.54 GFLOPs') flops = (3.21 * (10.0 ** 7)) assert (flops_to_string(flops, None) == '32.1 MFLOPs') flops = (5.4 * (10.0 ** 3)) assert (flops_to_string(flops, None) == '5.4 KFLOPs') flops = 987 assert (flops_to_string(flops, None) == '987 FLOPs')
def test_params_to_string(): num_params = (3.21 * (10.0 ** 7)) assert (params_to_string(num_params) == '32.1 M') num_params = (4.56 * (10.0 ** 5)) assert (params_to_string(num_params) == '456.0 k') num_params = (7.89 * (10.0 ** 2)) assert (params_to_string(num_params) == '789.0') num_params = (6.54321 * (10.0 ** 7)) assert (params_to_string(num_params, 'M') == '65.43 M') assert (params_to_string(num_params, 'K') == '65432.1 K') assert (params_to_string(num_params, '') == '65432100.0') assert (params_to_string(num_params, precision=4) == '65.4321 M')
def test_fuse_conv_bn(): inputs = torch.rand((1, 3, 5, 5)) modules = nn.ModuleList() modules.append(nn.BatchNorm2d(3)) modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN'))) modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN'))) modules = nn.Sequential(*modules) fused_modules = fuse_conv_bn(modules) assert torch.equal(modules(inputs), fused_modules(inputs))
def test_context_block(): imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='1000') assert (gen_attention_block.query_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0100') assert (gen_attention_block.query_conv.in_channels == 16) assert (gen_attention_block.appr_geom_fc_x.in_features == 8) assert (gen_attention_block.appr_geom_fc_y.in_features == 8) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0010') assert (gen_attention_block.key_conv.in_channels == 16) assert hasattr(gen_attention_block, 'appr_bias') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='0001') assert (gen_attention_block.appr_geom_fc_x.in_features == 8) assert (gen_attention_block.appr_geom_fc_y.in_features == 8) assert hasattr(gen_attention_block, 'geom_bias') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 256, 20, 20) gen_attention_block = GeneralizedAttention(256, spatial_range=10) assert hasattr(gen_attention_block, 'local_constraint_map') out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, q_stride=2) assert (gen_attention_block.q_downsample is not None) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, kv_stride=2) assert (gen_attention_block.kv_downsample is not None) out = gen_attention_block(imgs) assert (out.shape == imgs.shape) if torch.cuda.is_available(): imgs = torch.randn(2, 16, 20, 20).cuda().to(torch.half) gen_attention_block = GeneralizedAttention(16, spatial_range=(- 1), num_heads=8, attention_type='1111', kv_stride=2) gen_attention_block.cuda().type(torch.half) out = gen_attention_block(imgs) assert (out.shape == imgs.shape)
def test_hsigmoid(): with pytest.raises(AssertionError): HSigmoid(divisor=0) act = HSigmoid() input_shape = torch.Size([1, 3, 64, 64]) input = torch.randn(input_shape) output = act(input) expected_output = torch.min(torch.max(((input + 3) / 6), torch.zeros(input_shape)), torch.ones(input_shape)) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output) act = HSigmoid(1, 2, 0, 1) input_shape = torch.Size([1, 3, 64, 64]) input = torch.randn(input_shape) output = act(input) expected_output = torch.min(torch.max(((input + 1) / 2), torch.zeros(input_shape)), torch.ones(input_shape)) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_hswish(): act = HSwish(inplace=True) assert act.act.inplace act = HSwish() assert (not act.act.inplace) input = torch.randn(1, 3, 64, 64) expected_output = ((input * relu6((input + 3))) / 6) output = act(input) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_build_model_from_cfg(): BACKBONES = mmcv.Registry('backbone', build_func=build_model_from_cfg) @BACKBONES.register_module() class ResNet(nn.Module): def __init__(self, depth, stages=4): super().__init__() self.depth = depth self.stages = stages def forward(self, x): return x @BACKBONES.register_module() class ResNeXt(nn.Module): def __init__(self, depth, stages=4): super().__init__() self.depth = depth self.stages = stages def forward(self, x): return x cfg = dict(type='ResNet', depth=50) model = BACKBONES.build(cfg) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(type='ResNeXt', depth=50, stages=3) model = BACKBONES.build(cfg) assert isinstance(model, ResNeXt) assert ((model.depth == 50) and (model.stages == 3)) cfg = [dict(type='ResNet', depth=50), dict(type='ResNeXt', depth=50, stages=3)] model = BACKBONES.build(cfg) assert isinstance(model, nn.Sequential) assert isinstance(model[0], ResNet) assert ((model[0].depth == 50) and (model[0].stages == 4)) assert isinstance(model[1], ResNeXt) assert ((model[1].depth == 50) and (model[1].stages == 3)) NEW_MODELS = mmcv.Registry('models', parent=MODELS, scope='new') assert (NEW_MODELS.build_func is build_model_from_cfg) def pseudo_build(cfg): return cfg NEW_MODELS = mmcv.Registry('models', parent=MODELS, build_func=pseudo_build) assert (NEW_MODELS.build_func is pseudo_build)
def test_nonlocal(): with pytest.raises(ValueError): _NonLocalNd(3, mode='unsupport_mode') _NonLocalNd(3) _NonLocalNd(3, norm_cfg=dict(type='BN')) _NonLocalNd(3, zeros_init=False) _NonLocalNd(3, norm_cfg=dict(type='BN'), zeros_init=False)
def test_nonlocal3d(): imgs = torch.randn(2, 3, 10, 20, 20) nonlocal_3d = NonLocal3d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='dot_product') assert (nonlocal_3d.mode == 'dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='concatenation') assert (nonlocal_3d.mode == 'concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='gaussian') assert (not hasattr(nonlocal_3d, 'phi')) assert (nonlocal_3d.mode == 'gaussian') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_3d.g, nn.Sequential) and (len(nonlocal_3d.g) == 2)) assert isinstance(nonlocal_3d.g[1], nn.MaxPool3d) assert (nonlocal_3d.g[1].kernel_size == (1, 2, 2)) assert isinstance(nonlocal_3d.phi, nn.MaxPool3d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d = NonLocal3d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_3d.g, nonlocal_3d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool3d) assert (m[1].kernel_size == (1, 2, 2)) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape)
def test_nonlocal2d(): imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3, mode='gaussian') assert (not hasattr(nonlocal_2d, 'phi')) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) nonlocal_2d = NonLocal2d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_2d.g, nn.Sequential) and (len(nonlocal_2d.g) == 2)) assert isinstance(nonlocal_2d.g[1], nn.MaxPool2d) assert (nonlocal_2d.g[1].kernel_size == (2, 2)) assert isinstance(nonlocal_2d.phi, nn.MaxPool2d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) nonlocal_2d = NonLocal2d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_2d.g, nonlocal_2d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool2d) assert (m[1].kernel_size == (2, 2)) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape)
def test_nonlocal1d(): imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='gaussian') assert (not hasattr(nonlocal_1d, 'phi')) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_1d.g, nn.Sequential) and (len(nonlocal_1d.g) == 2)) assert isinstance(nonlocal_1d.g[1], nn.MaxPool1d) assert (nonlocal_1d.g[1].kernel_size == 2) assert isinstance(nonlocal_1d.phi, nn.MaxPool1d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_1d.g, nonlocal_1d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool1d) assert (m[1].kernel_size == 2) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape)
def test_revert_syncbn(): conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN')) x = torch.randn(1, 3, 10, 10) with pytest.raises(ValueError): y = conv(x) conv = revert_sync_batchnorm(conv) y = conv(x) assert (y.shape == (1, 8, 9, 9))
def test_revert_mmsyncbn(): if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) < 2)): print('Must run on slurm with more than 1 process!\nsrun -p test --gres=gpu:2 -n2') return rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NTASKS']) local_rank = int(os.environ['SLURM_LOCALID']) node_list = str(os.environ['SLURM_NODELIST']) node_parts = re.findall('[0-9]+', node_list) os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}') os.environ['MASTER_PORT'] = '12341' os.environ['WORLD_SIZE'] = str(world_size) os.environ['RANK'] = str(rank) dist.init_process_group('nccl') torch.cuda.set_device(local_rank) x = torch.randn(1, 3, 10, 10).cuda() dist.broadcast(x, src=0) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='MMSyncBN')).cuda() conv.eval() y_mmsyncbn = conv(x).detach().cpu().numpy() conv = revert_sync_batchnorm(conv) y_bn = conv(x).detach().cpu().numpy() assert np.all(np.isclose(y_bn, y_mmsyncbn, 0.001)) (conv, x) = (conv.to('cpu'), x.to('cpu')) y_bn_cpu = conv(x).detach().numpy() assert np.all(np.isclose(y_bn, y_bn_cpu, 0.001))
def test_scale(): scale = Scale() assert (scale.scale.data == 1.0) assert (scale.scale.dtype == torch.float) x = torch.rand(1, 3, 64, 64) output = scale(x) assert (output.shape == (1, 3, 64, 64)) scale = Scale(10.0) assert (scale.scale.data == 10.0) assert (scale.scale.dtype == torch.float) x = torch.rand(1, 3, 64, 64) output = scale(x) assert (output.shape == (1, 3, 64, 64))
def test_swish(): act = Swish() input = torch.randn(1, 3, 64, 64) expected_output = (input * F.sigmoid(input)) output = act(input) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_adaptive_padding(): for padding in ('same', 'corner'): kernel_size = 16 stride = 16 dilation = 1 input = torch.rand(1, 1, 15, 17) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (16, 32)) input = torch.rand(1, 1, 16, 17) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (16, 32)) kernel_size = (2, 2) stride = (2, 2) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (12, 14)) kernel_size = (2, 2) stride = (10, 10) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 10, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (10, 13)) kernel_size = (11, 11) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) assert ((out.shape[2], out.shape[3]) == (21, 21)) input = torch.rand(1, 1, 11, 13) stride = (3, 4) kernel_size = (4, 5) dilation = (2, 2) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) dilation_out = adap_pad(input) assert ((dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)) kernel_size = (7, 9) dilation = (1, 1) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) kernel79_out = adap_pad(input) assert ((kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)) assert (kernel79_out.shape == dilation_out.shape) with pytest.raises(AssertionError): AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1)
def test_patch_embed(): B = 2 H = 3 W = 4 C = 3 embed_dims = 10 kernel_size = 3 stride = 1 dummy_input = torch.rand(B, C, H, W) patch_merge_1 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=1, norm_cfg=None) (x1, shape) = patch_merge_1(dummy_input) assert (x1.shape == (2, 2, 10)) assert (shape == (1, 2)) assert ((shape[0] * shape[1]) == x1.shape[1]) B = 2 H = 10 W = 10 C = 3 embed_dims = 10 kernel_size = 5 stride = 2 dummy_input = torch.rand(B, C, H, W) patch_merge_2 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=None) (x2, shape) = patch_merge_2(dummy_input) assert (x2.shape == (2, 1, 10)) assert (shape == (1, 1)) assert ((shape[0] * shape[1]) == x2.shape[1]) stride = 2 input_size = (10, 10) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (x3, shape) = patch_merge_3(dummy_input) assert (x3.shape == (2, 1, 10)) assert (shape == (1, 1)) assert ((shape[0] * shape[1]) == x3.shape[1]) assert (patch_merge_3.init_out_size[1] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1)) assert (patch_merge_3.init_out_size[0] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1)) H = 11 W = 12 input_size = (H, W) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (_, shape) = patch_merge_3(dummy_input) assert (shape == patch_merge_3.init_out_size) input_size = (H, W) dummy_input = torch.rand(B, C, H, W) patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) (_, shape) = patch_merge_3(dummy_input) assert (shape == patch_merge_3.init_out_size) for padding in ('same', 'corner'): in_c = 2 embed_dims = 3 B = 2 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 25, 3)) assert (out_size == (5, 5)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 1, 3)) assert (out_size == (1, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 2, 3)) assert (out_size == (2, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_embed(x) assert (x_out.size() == (B, 3, 3)) assert (out_size == (1, 3)) assert (x_out.size(1) == (out_size[0] * out_size[1]))
def test_patch_merging(): in_c = 3 out_c = 4 kernel_size = 3 stride = 3 padding = 1 dilation = 1 bias = False patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (B, L, C) = (1, 100, 3) input_size = (10, 10) x = torch.rand(B, L, C) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (1, 16, 4)) assert (out_size == (4, 4)) assert (x_out.size(1) == (out_size[0] * out_size[1])) in_c = 4 out_c = 5 kernel_size = 6 stride = 3 padding = 2 dilation = 2 bias = False patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (B, L, C) = (1, 100, 4) input_size = (10, 10) x = torch.rand(B, L, C) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (1, 4, 5)) assert (out_size == (2, 2)) assert (x_out.size(1) == (out_size[0] * out_size[1])) for padding in ('same', 'corner'): in_c = 2 out_c = 3 B = 2 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 25, 3)) assert (out_size == (5, 5)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 1, 3)) assert (out_size == (1, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 2, 3)) assert (out_size == (2, 1)) assert (x_out.size(1) == (out_size[0] * out_size[1])) input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False L = (input_size[0] * input_size[1]) x = torch.rand(B, L, in_c) patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (x_out, out_size) = patch_merge(x, input_size) assert (x_out.size() == (B, 3, 3)) assert (out_size == (1, 3)) assert (x_out.size(1) == (out_size[0] * out_size[1]))
def test_multiheadattention(): MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='Dropout', drop_prob=0.0), batch_first=True) batch_dim = 2 embed_dim = 5 num_query = 100 attn_batch_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=True) attn_query_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=False) param_dict = dict(attn_query_first.named_parameters()) for (n, v) in attn_batch_first.named_parameters(): param_dict[n].data = v.data input_batch_first = torch.rand(batch_dim, num_query, embed_dim) input_query_first = input_batch_first.transpose(0, 1) assert torch.allclose(attn_query_first(input_query_first).sum(), attn_batch_first(input_batch_first).sum()) key_batch_first = torch.rand(batch_dim, num_query, embed_dim) key_query_first = key_batch_first.transpose(0, 1) assert torch.allclose(attn_query_first(input_query_first, key_query_first).sum(), attn_batch_first(input_batch_first, key_batch_first).sum()) identity = torch.ones_like(input_query_first) assert torch.allclose(attn_query_first(input_query_first, key_query_first, residual=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum())) assert torch.allclose(attn_query_first(input_query_first, key_query_first, identity=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum())) (attn_query_first(input_query_first, key_query_first, identity=identity).sum(),)
def test_ffn(): with pytest.raises(AssertionError): FFN(num_fcs=1) FFN(dropout=0, add_residual=True) ffn = FFN(dropout=0, add_identity=True) input_tensor = torch.rand(2, 20, 256) input_tensor_nbc = input_tensor.transpose(0, 1) assert torch.allclose(ffn(input_tensor).sum(), ffn(input_tensor_nbc).sum()) residual = torch.rand_like(input_tensor) torch.allclose(ffn(input_tensor, residual=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum())) torch.allclose(ffn(input_tensor, identity=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum()))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='Cuda not available') def test_basetransformerlayer_cuda(): operation_order = ('self_attn', 'ffn') baselayer = BaseTransformerLayer(operation_order=operation_order, batch_first=True, attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8)) baselayers = ModuleList([copy.deepcopy(baselayer) for _ in range(2)]) baselayers.to('cuda') x = torch.rand(2, 10, 256).cuda() for m in baselayers: x = m(x) assert (x.shape == torch.Size([2, 10, 256]))
@pytest.mark.parametrize('embed_dims', [False, 256]) def test_basetransformerlayer(embed_dims): attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),) if embed_dims: ffn_cfgs = dict(type='FFN', embed_dims=embed_dims, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)) else: ffn_cfgs = dict(type='FFN', feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)) feedforward_channels = 2048 ffn_dropout = 0.1 operation_order = ('self_attn', 'norm', 'ffn', 'norm') baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, ffn_cfgs=ffn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order) assert (baselayer.batch_first is False) assert (baselayer.ffns[0].feedforward_channels == feedforward_channels) attn_cfgs = (dict(type='MultiheadAttention', num_heads=8, embed_dims=256),) feedforward_channels = 2048 ffn_dropout = 0.1 operation_order = ('self_attn', 'norm', 'ffn', 'norm') baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, batch_first=True) assert baselayer.attentions[0].batch_first in_tensor = torch.rand(2, 10, 256) baselayer(in_tensor)
def test_transformerlayersequence(): squeue = TransformerLayerSequence(num_layers=6, transformerlayers=dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256, num_heads=4)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))) assert (len(squeue.layers) == 6) assert (squeue.pre_norm is False) with pytest.raises(AssertionError): TransformerLayerSequence(num_layers=6, transformerlayers=[dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))])
def test_drop_path(): drop_path = DropPath(drop_prob=0) test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path = DropPath(drop_prob=0.1) drop_path.training = False test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path.training = True assert (test_in is not drop_path(test_in))
def test_constant_init(): conv_module = nn.Conv2d(3, 16, 3) constant_init(conv_module, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1)) assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) constant_init(conv_module_no_bias, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1))
def test_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) xavier_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) xavier_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): xavier_init(conv_module, distribution='student-t') conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) xavier_init(conv_module_no_bias)
def test_normal_init(): conv_module = nn.Conv2d(3, 16, 3) normal_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) normal_init(conv_module_no_bias)
def test_trunc_normal_init(): def _random_float(a, b): return (((b - a) * random.random()) + a) def _is_trunc_normal(tensor, mean, std, a, b): z_samples = ((tensor.view((- 1)) - mean) / std) z_samples = z_samples.tolist() a0 = ((a - mean) / std) b0 = ((b - mean) / std) p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1] return (p_value > 0.0001) conv_module = nn.Conv2d(3, 16, 3) mean = _random_float((- 3), 3) std = _random_float(0.01, 1) a = _random_float((mean - (2 * std)), mean) b = _random_float(mean, (mean + (2 * std))) trunc_normal_init(conv_module, mean, std, a, b, bias=0.1) assert _is_trunc_normal(conv_module.weight, mean, std, a, b) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) trunc_normal_init(conv_module_no_bias)
def test_uniform_init(): conv_module = nn.Conv2d(3, 16, 3) uniform_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) uniform_init(conv_module_no_bias)
def test_kaiming_init(): conv_module = nn.Conv2d(3, 16, 3) kaiming_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) kaiming_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): kaiming_init(conv_module, distribution='student-t') conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) caffe2_xavier_init(conv_module)
def test_bias_init_with_prob(): conv_module = nn.Conv2d(3, 16, 3) prior_prob = 0.1 normal_init(conv_module, bias=bias_init_with_prob(0.1)) bias = float((- np.log(((1 - prior_prob) / prior_prob)))) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit(): 'test ConstantInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = ConstantInit(val=1, bias=2, layer='Conv2d') func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0))) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0))) func = ConstantInit(val=3, bias_prob=0.01, layer='Linear') func(model) res = bias_init_with_prob(0.01) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd') func(model) assert torch.all((model[0].weight == 4.0)) assert torch.all((model[2].weight == 4.0)) assert torch.all((model[0].bias == 5.0)) assert torch.all((model[2].bias == 5.0)) with pytest.raises(TypeError): func = ConstantInit(val=1, bias='1') with pytest.raises(TypeError): func = ConstantInit(val=1, bias_prob='1') with pytest.raises(TypeError): func = ConstantInit(val=1, layer=1)
def test_xavierinit(): 'test XavierInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = XavierInit(bias=0.1, layer='Conv2d') func(model) assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1)) assert (not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))) constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear']) func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear']) model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) res = bias_init_with_prob(0.01) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd') func(model) assert torch.all((model[0].weight == 4.0)) assert torch.all((model[2].weight == 4.0)) assert torch.all((model[0].bias == 5.0)) assert torch.all((model[2].bias == 5.0)) func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd') func(model) assert (not torch.all((model[0].weight == 4.0))) assert (not torch.all((model[2].weight == 4.0))) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res)) with pytest.raises(TypeError): func = XavierInit(bias='0.1', layer='Conv2d') with pytest.raises(TypeError): func = XavierInit(bias=0.1, layer=1)
def test_normalinit(): 'test Normalinit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = NormalInit(mean=100, std=1e-05, bias=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(torch.tensor(100.0)) assert model[0].bias.allclose(torch.tensor(200.0)) assert model[2].bias.allclose(torch.tensor(200.0)) func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer=['Conv2d', 'Linear']) res = bias_init_with_prob(0.01) func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert model[0].bias.allclose(torch.tensor(res)) assert model[2].bias.allclose(torch.tensor(res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer='_ConvNd') func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_truncnormalinit(): 'test TruncNormalInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = TruncNormalInit(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(torch.tensor(100.0)) assert model[0].bias.allclose(torch.tensor(200.0)) assert model[2].bias.allclose(torch.tensor(200.0)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=['Conv2d', 'Linear']) res = bias_init_with_prob(0.01) func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert model[0].bias.allclose(torch.tensor(res)) assert model[2].bias.allclose(torch.tensor(res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer='_ConvNd') func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_uniforminit(): '"test UniformInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear']) func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)) func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10) func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 100.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 100.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd') res = bias_init_with_prob(0.01) func(model) assert torch.all((model[0].weight == 100.0)) assert torch.all((model[2].weight == 100.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_kaiminginit(): 'test KaimingInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = KaimingInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))) func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear']) constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear']) model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = KaimingInit(bias=0.1, layer='_ConvNd') func(model) assert torch.all((model[0].bias == 0.1)) assert torch.all((model[2].bias == 0.1)) func = KaimingInit(a=100, bias=10, layer='_ConvNd') constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd') model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
def test_caffe2xavierinit(): 'test Caffe2XavierInit.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = Caffe2XavierInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1)))
class FooModule(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(1, 2) self.conv2d = nn.Conv2d(3, 1, 3) self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit(): 'test PretrainedInit class.' modelA = FooModule() constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear']) modelA.apply(constant_func) modelB = FooModule() funcB = PretrainedInit(checkpoint='modelA.pth') modelC = nn.Linear(1, 2) funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.') with TemporaryDirectory(): torch.save(modelA.state_dict(), 'modelA.pth') funcB(modelB) assert torch.equal(modelB.linear.weight, torch.full(modelB.linear.weight.shape, 1.0)) assert torch.equal(modelB.linear.bias, torch.full(modelB.linear.bias.shape, 2.0)) assert torch.equal(modelB.conv2d.weight, torch.full(modelB.conv2d.weight.shape, 1.0)) assert torch.equal(modelB.conv2d.bias, torch.full(modelB.conv2d.bias.shape, 2.0)) assert torch.equal(modelB.conv2d_2.weight, torch.full(modelB.conv2d_2.weight.shape, 1.0)) assert torch.equal(modelB.conv2d_2.bias, torch.full(modelB.conv2d_2.bias.shape, 2.0)) funcC(modelC) assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.0)) assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.0))
def test_initialize(): model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) foonet = FooModule() init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2) initialize(model, init_cfg) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)) assert (init_cfg == dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)) init_cfg = [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)] initialize(model, init_cfg) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.0)) assert (init_cfg == [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)]) init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4)) initialize(foonet, init_cfg) assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0)) assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0)) assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0)) assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0)) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0)) assert (init_cfg == dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4))) init_cfg = dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2')) initialize(foonet, init_cfg) assert (not torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 5.0))) assert (not torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 6.0))) assert (not torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 5.0))) assert (not torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 6.0))) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 5.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 6.0)) assert (init_cfg == dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))) init_cfg = dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4)) modelA = FooModule() constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear']) modelA.apply(constant_func) with TemporaryDirectory(): torch.save(modelA.state_dict(), 'modelA.pth') initialize(foonet, init_cfg) assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0)) assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0)) assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0)) assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0)) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0)) assert (init_cfg == dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4))) with pytest.raises(TypeError): init_cfg = 'init_cfg' initialize(foonet, init_cfg) with pytest.raises(TypeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override='conv') initialize(foonet, init_cfg) with pytest.raises(RuntimeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_3', val=3, bias=4)) initialize(foonet, init_cfg) with pytest.raises(RuntimeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=[dict(type='Constant', name='conv2d', val=3, bias=4), dict(type='Constant', name='conv2d_3', val=5, bias=6)]) initialize(foonet, init_cfg) with pytest.raises(ValueError): init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='conv2d_2', val=3, bias=4)) initialize(foonet, init_cfg) with pytest.raises(ValueError): init_cfg = dict(type='Constant', val=1, bias=2, override=dict(type='Constant', val=3, bias=4)) initialize(foonet, init_cfg)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv2d\n ' x_empty = torch.randn(0, in_channel, in_h, in_w) torch.manual_seed(0) wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w).requires_grad_(True) torch.manual_seed(0) ref = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_h, in_w) wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv3d\n ' x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) torch.manual_seed(0) wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w).requires_grad_(True) torch.manual_seed(0) ref = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True) op = (min(stride, dilation) - 1) if (torch.__version__ == 'parrots'): op = 0 torch.manual_seed(0) wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w) torch.manual_seed(0) ref = nn.ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_h, in_w) wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True) op = (min(stride, dilation) - 1) torch.manual_seed(0) wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w) torch.manual_seed(0) ref = nn.ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True) wrapper = MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w) ref = nn.MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) @pytest.mark.skipif(((torch.__version__ == 'parrots') and (not torch.cuda.is_available())), reason='parrots requires CUDA support') def test_max_pool_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True) wrapper = MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation) if (torch.__version__ == 'parrots'): x_empty = x_empty.cuda() wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w) ref = nn.MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation) if (torch.__version__ == 'parrots'): x_normal = x_normal.cuda() ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1), (20, 20, 3, 3)]) def test_linear(in_w, in_h, in_feature, out_feature): x_empty = torch.randn(0, in_feature, requires_grad=True) torch.manual_seed(0) wrapper = Linear(in_feature, out_feature) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_feature) torch.manual_seed(0) ref = nn.Linear(in_feature, out_feature) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_feature) wrapper = Linear(in_feature, out_feature) wrapper.eval() wrapper(x_empty)
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10)) def test_nn_op_forward_called(): for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']: with patch(f'torch.nn.{m}.forward') as nn_module_forward: x_empty = torch.randn(0, 3, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal) for m in ['Conv3d', 'ConvTranspose3d', 'MaxPool3d']: with patch(f'torch.nn.{m}.forward') as nn_module_forward: x_empty = torch.randn(0, 3, 10, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3, 10, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal) with patch('torch.nn.Linear.forward') as nn_module_forward: x_empty = torch.randn(0, 3) wrapper = Linear(3, 3) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3) wrapper = Linear(3, 3) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal)
@contextmanager def build_temporary_directory(): 'Build a temporary directory containing many files to test\n ``FileClient.list_dir_or_file``.\n\n . \n\n | -- dir1 \n\n | -- | -- text3.txt \n\n | -- dir2 \n\n | -- | -- dir3 \n\n | -- | -- | -- text4.txt \n\n | -- | -- img.jpg \n\n | -- text1.txt \n\n | -- text2.txt \n\n ' with tempfile.TemporaryDirectory() as tmp_dir: text1 = (Path(tmp_dir) / 'text1.txt') text1.open('w').write('text1') text2 = (Path(tmp_dir) / 'text2.txt') text2.open('w').write('text2') dir1 = (Path(tmp_dir) / 'dir1') dir1.mkdir() text3 = (dir1 / 'text3.txt') text3.open('w').write('text3') dir2 = (Path(tmp_dir) / 'dir2') dir2.mkdir() jpg1 = (dir2 / 'img.jpg') jpg1.open('wb').write(b'img') dir3 = (dir2 / 'dir3') dir3.mkdir() text4 = (dir3 / 'text4.txt') text4.open('w').write('text4') (yield tmp_dir)
@contextmanager def delete_and_reset_method(obj, method): method_obj = deepcopy(getattr(type(obj), method)) try: delattr(type(obj), method) (yield) finally: setattr(type(obj), method, method_obj)
class MockS3Client(): def __init__(self, enable_mc=True): self.enable_mc = enable_mc def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content
class MockPetrelClient(): def __init__(self, enable_mc=True, enable_multi_cluster=False): self.enable_mc = enable_mc self.enable_multi_cluster = enable_multi_cluster def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content def put(self): pass def delete(self): pass def contains(self): pass def isdir(self): pass def list(self, dir_path): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): (yield entry.name) elif osp.isdir(entry.path): (yield (entry.name + '/'))
class MockMemcachedClient(): def __init__(self, server_list_cfg, client_cfg): pass def Get(self, filepath, buffer): with open(filepath, 'rb') as f: buffer.content = f.read()