code
stringlengths
17
6.64M
def is_tuple_of(seq, expected_type): 'Check whether it is a tuple of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=tuple)
def slice_list(in_list, lens): 'Slice a list into several sub lists by a list of given length.\n\n Args:\n in_list (list): The list to be sliced.\n lens(int or list): The expected length of each out list.\n\n Returns:\n list: A list of sliced list.\n ' if isinstance(lens, int): assert ((len(in_list) % lens) == 0) lens = ([lens] * int((len(in_list) / lens))) if (not isinstance(lens, list)): raise TypeError('"indices" must be an integer or a list of integers') elif (sum(lens) != len(in_list)): raise ValueError('sum of lens and list length does not match: {} != {}'.format(sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:(idx + lens[i])]) idx += lens[i] return out_list
def concat_list(in_list): 'Concatenate a list of list into a single list.\n\n Args:\n in_list (list): The list of list to be merged.\n\n Returns:\n list: The concatenated flat list.\n ' return list(itertools.chain(*in_list))
def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not found, please install them first.'): 'A decorator factory to check if prerequisites are satisfied.\n\n Args:\n prerequisites (str of list[str]): Prerequisites to be checked.\n checker (callable): The checker method that returns True if a\n prerequisite is meet, False otherwise.\n msg_tmpl (str): The message template with two variables.\n\n Returns:\n decorator: A specific decorator.\n ' def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites) missing = [] for item in requirements: if (not checker(item)): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
def _check_py_package(package): try: import_module(package) except ImportError: return False else: return True
def _check_executable(cmd): if (subprocess.call('which {}'.format(cmd), shell=True) != 0): return False else: return True
def requires_package(prerequisites): "A decorator to check if some python packages are installed.\n\n Example:\n >>> @requires_package('numpy')\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n array([0.])\n >>> @requires_package(['numpy', 'non_package'])\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n ImportError\n " return check_prerequisites(prerequisites, checker=_check_py_package)
def requires_executable(prerequisites): "A decorator to check if some executable files are installed.\n\n Example:\n >>> @requires_executable('ffmpeg')\n >>> func(arg1, args):\n >>> print(1)\n 1\n " return check_prerequisites(prerequisites, checker=_check_executable)
def is_filepath(x): if (is_str(x) or isinstance(x, Path)): return True else: return False
def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs)
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if (not osp.isfile(filename)): raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=511): if (dir_name == ''): return dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs): if (os.path.lexists(dst) and overwrite): os.remove(dst) os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False): 'Scan a directory to find the interested files.\n\n Args:\n dir_path (str | obj:`Path`): Path of the directory.\n suffix (str | tuple(str), optional): File suffix that we are\n interested in. Default: None.\n recursive (bool, optional): If set to True, recursively scan the\n directory. Default: False.\n\n Returns:\n A generator for all the interested files with relative pathes.\n ' if isinstance(dir_path, (str, Path)): dir_path = str(dir_path) else: raise TypeError('"dir_path" must be a string or Path object') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('"suffix" must be a string or tuple of strings') root = dir_path def _scandir(dir_path, suffix, recursive): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): rel_path = osp.relpath(entry.path, root) if (suffix is None): (yield rel_path) elif rel_path.endswith(suffix): (yield rel_path) elif recursive: (yield from _scandir(entry.path, suffix=suffix, recursive=recursive)) else: continue return _scandir(dir_path, suffix=suffix, recursive=recursive)
def find_vcs_root(path, markers=('.git',)): 'Finds the root directory (including itself) of specified markers.\n\n Args:\n path (str): Path of directory or file.\n markers (list[str], optional): List of file or directory names.\n\n Returns:\n The directory contained one of the markers or None if not found.\n ' if osp.isfile(path): path = osp.dirname(path) (prev, cur) = (None, osp.abspath(osp.expanduser(path))) while (cur != prev): if any((osp.exists(osp.join(cur, marker)) for marker in markers)): return cur (prev, cur) = (cur, osp.split(cur)[0]) return None
class ProgressBar(object): 'A progress bar which can print the progress' def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): self.task_num = task_num self.bar_width = bar_width self.completed = 0 self.file = file if start: self.start() @property def terminal_width(self): (width, _) = get_terminal_size() return width def start(self): if (self.task_num > 0): self.file.write('[{}] 0/{}, elapsed: 0s, ETA:'.format((' ' * self.bar_width), self.task_num)) else: self.file.write('completed: 0, elapsed: 0s') self.file.flush() self.timer = Timer() def update(self): self.completed += 1 elapsed = self.timer.since_start() if (elapsed > 0): fps = (self.completed / elapsed) else: fps = float('inf') if (self.task_num > 0): percentage = (self.completed / float(self.task_num)) eta = int((((elapsed * (1 - percentage)) / percentage) + 0.5)) msg = '\r[{{}}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s'.format(self.completed, self.task_num, fps, int((elapsed + 0.5)), eta) bar_width = min(self.bar_width, (int((self.terminal_width - len(msg))) + 2), int((self.terminal_width * 0.6))) bar_width = max(2, bar_width) mark_width = int((bar_width * percentage)) bar_chars = (('>' * mark_width) + (' ' * (bar_width - mark_width))) self.file.write(msg.format(bar_chars)) else: self.file.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(self.completed, int((elapsed + 0.5)), fps)) self.file.flush()
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): 'Track the progress of tasks execution with a progress bar.\n\n Tasks are done with a simple for-loop.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() prog_bar.file.write('\n') return results
def init_pool(process_num, initializer=None, initargs=None): if (initializer is None): return Pool(process_num) elif (initargs is None): return Pool(process_num, initializer) else: if (not isinstance(initargs, tuple)): raise TypeError('"initargs" must be a tuple') return Pool(process_num, initializer, initargs)
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): 'Track the progress of parallel task execution with a progress bar.\n\n The built-in :mod:`multiprocessing` module is used for process pools and\n tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n nproc (int): Process (worker) number.\n initializer (None or callable): Refer to :class:`multiprocessing.Pool`\n for details.\n initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for\n details.\n chunksize (int): Refer to :class:`multiprocessing.Pool` for details.\n bar_width (int): Width of progress bar.\n skip_first (bool): Whether to skip the first sample for each worker\n when estimating fps, since the initialization step may takes\n longer.\n keep_order (bool): If True, :func:`Pool.imap` is used, otherwise\n :func:`Pool.imap_unordered` is used.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = (not skip_first) task_num -= ((nproc * chunksize) * int(skip_first)) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if (len(results) < (nproc * chunksize)): continue elif (len(results) == (nproc * chunksize)): prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results
def track_iter_progress(tasks, bar_width=50, file=sys.stdout, **kwargs): 'Track the progress of tasks iteration or enumeration with a progress bar.\n\n Tasks are yielded with a simple for-loop.\n\n Args:\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Yields:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) for task in tasks: (yield task) prog_bar.update() prog_bar.file.write('\n')
class Registry(object): 'A registry to map strings to classes.\n\n Args:\n name (str): Registry name.\n ' def __init__(self, name): self._name = name self._module_dict = dict() def __len__(self): return len(self._module_dict) def __repr__(self): format_str = (self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys()))) return format_str @property def name(self): return self._name @property def module_dict(self): return self._module_dict def get(self, key): 'Get the registry record.\n\n Args:\n key (str): The class name in string format.\n\n Returns:\n class: The corresponding class.\n ' return self._module_dict.get(key, None) def _register_module(self, module_class, force=False): if (not inspect.isclass(module_class)): raise TypeError('module must be a class, but got {}'.format(type(module_class))) module_name = module_class.__name__ if ((not force) and (module_name in self._module_dict)): raise KeyError('{} is already registered in {}'.format(module_name, self.name)) self._module_dict[module_name] = module_class def register_module(self, cls=None, force=False): "Register a module.\n\n A record will be added to `self._module_dict`, whose key is the class\n name and value is the class itself.\n It can be used as a decorator or a normal function.\n\n Example:\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module\n >>> class ResNet(object):\n >>> pass\n\n Example:\n >>> backbones = Registry('backbone')\n >>> class ResNet(object):\n >>> pass\n >>> backbones.register_module(ResNet)\n\n Args:\n module (:obj:`nn.Module`): Module to be registered.\n force (bool, optional): Whether to override an existing class with\n the same name. Default: False.\n " if (cls is None): return partial(self.register_module, force=force) self._register_module(cls, force=force) return cls
def build_from_cfg(cfg, registry, default_args=None): 'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n obj: The constructed object.\n ' if (not (isinstance(cfg, dict) and ('type' in cfg))): raise TypeError('cfg must be a dict containing the key "type"') if (not isinstance(registry, Registry)): raise TypeError('registry must be an mmcv.Registry object, but got {}'.format(type(registry))) if (not (isinstance(default_args, dict) or (default_args is None))): raise TypeError('default_args must be a dict or None, but got {}'.format(type(default_args))) args = cfg.copy() obj_type = args.pop('type') if is_str(obj_type): obj_cls = registry.get(obj_type) if (obj_cls is None): raise KeyError('{} is not in the {} registry'.format(obj_type, registry.name)) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type))) if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) return obj_cls(**args)
class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message)
class Timer(object): "A flexible Timer class.\n\n :Example:\n\n >>> import time\n >>> import mmcv\n >>> with mmcv.Timer():\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n 1.000\n >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n it takes 1.0 seconds\n >>> timer = mmcv.Timer()\n >>> time.sleep(0.5)\n >>> print(timer.since_start())\n 0.500\n >>> time.sleep(0.5)\n >>> print(timer.since_last_check())\n 0.500\n >>> print(timer.since_start())\n 1.000\n " def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() @property def is_running(self): 'bool: indicate whether the timer is running' return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): 'Start the timer.' if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): 'Total time since the timer is started.\n\n Returns (float): Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): 'Time since the last checking.\n\n Either :func:`since_start` or :func:`since_last_check` is a checking\n operation.\n\n Returns (float): Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur
def check_time(timer_id): "Add check points in a single line.\n\n This method is suitable for running a task on a list of items. A timer will\n be registered when the method is called for the first time.\n\n :Example:\n\n >>> import time\n >>> import mmcv\n >>> for i in range(1, 6):\n >>> # simulate a code block\n >>> time.sleep(i)\n >>> mmcv.check_time('task1')\n 2.000\n 3.000\n 4.000\n 5.000\n\n Args:\n timer_id (str): Timer identifier.\n " if (timer_id not in _g_timers): _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
class Cache(object): def __init__(self, capacity): self._cache = OrderedDict() self._capacity = int(capacity) if (capacity <= 0): raise ValueError('capacity must be a positive integer') @property def capacity(self): return self._capacity @property def size(self): return len(self._cache) def put(self, key, val): if (key in self._cache): return if (len(self._cache) >= self.capacity): self._cache.popitem(last=False) self._cache[key] = val def get(self, key, default=None): val = (self._cache[key] if (key in self._cache) else default) return val
class VideoReader(object): "Video class with similar usage to a list object.\n\n This video warpper class provides convenient apis to access frames.\n There exists an issue of OpenCV's VideoCapture class that jumping to a\n certain frame may be inaccurate. It is fixed in this class by checking\n the position after jumping each time.\n Cache is used when decoding videos. So if the same frame is visited for\n the second time, there is no need to decode again if it is stored in the\n cache.\n\n :Example:\n\n >>> import mmcv\n >>> v = mmcv.VideoReader('sample.mp4')\n >>> len(v) # get the total frame number with `len()`\n 120\n >>> for img in v: # v is iterable\n >>> mmcv.imshow(img)\n >>> v[5] # get the 6th frame\n " def __init__(self, filename, cache_capacity=10): check_file_exist(filename, ('Video file not found: ' + filename)) self._vcap = cv2.VideoCapture(filename) assert (cache_capacity > 0) self._cache = Cache(cache_capacity) self._position = 0 self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) self._fps = self._vcap.get(CAP_PROP_FPS) self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) self._fourcc = self._vcap.get(CAP_PROP_FOURCC) @property def vcap(self): ':obj:`cv2.VideoCapture`: The raw VideoCapture object.' return self._vcap @property def opened(self): 'bool: Indicate whether the video is opened.' return self._vcap.isOpened() @property def width(self): 'int: Width of video frames.' return self._width @property def height(self): 'int: Height of video frames.' return self._height @property def resolution(self): 'tuple: Video resolution (width, height).' return (self._width, self._height) @property def fps(self): 'float: FPS of the video.' return self._fps @property def frame_cnt(self): 'int: Total frames of the video.' return self._frame_cnt @property def fourcc(self): 'str: "Four character code" of the video.' return self._fourcc @property def position(self): 'int: Current cursor position, indicating frame decoded.' return self._position def _get_real_position(self): return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) def _set_real_position(self, frame_id): self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) pos = self._get_real_position() for _ in range((frame_id - pos)): self._vcap.read() self._position = frame_id def read(self): 'Read the next frame.\n\n If the next frame have been decoded before and in the cache, then\n return it directly, otherwise decode, cache and return it.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if self._cache: img = self._cache.get(self._position) if (img is not None): ret = True else: if (self._position != self._get_real_position()): self._set_real_position(self._position) (ret, img) = self._vcap.read() if ret: self._cache.put(self._position, img) else: (ret, img) = self._vcap.read() if ret: self._position += 1 return img def get_frame(self, frame_id): 'Get frame by index.\n\n Args:\n frame_id (int): Index of the expected frame, 0-based.\n\n Returns:\n ndarray or None: Return the frame if successful, otherwise None.\n ' if ((frame_id < 0) or (frame_id >= self._frame_cnt)): raise IndexError('"frame_id" must be between 0 and {}'.format((self._frame_cnt - 1))) if (frame_id == self._position): return self.read() if self._cache: img = self._cache.get(frame_id) if (img is not None): self._position = (frame_id + 1) return img self._set_real_position(frame_id) (ret, img) = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img def current_frame(self): 'Get the current frame (frame that is just visited).\n\n Returns:\n ndarray or None: If the video is fresh, return None, otherwise\n return the frame.\n ' if (self._position == 0): return None return self._cache.get((self._position - 1)) def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): 'Convert a video to frame images\n\n Args:\n frame_dir (str): Output directory to store all the frame images.\n file_start (int): Filenames will start from the specified number.\n filename_tmpl (str): Filename template with the index as the\n placeholder.\n start (int): The starting frame index.\n max_num (int): Maximum number of frames to be written.\n show_progress (bool): Whether to show a progress bar.\n ' mkdir_or_exist(frame_dir) if (max_num == 0): task_num = (self.frame_cnt - start) else: task_num = min((self.frame_cnt - start), max_num) if (task_num <= 0): raise ValueError('start must be less than total frame number') if (start > 0): self._set_real_position(start) def write_frame(file_idx): img = self.read() filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, (file_start + task_num))) else: for i in range(task_num): img = self.read() if (img is None): break filename = osp.join(frame_dir, filename_tmpl.format((i + file_start))) cv2.imwrite(filename, img) def __len__(self): return self.frame_cnt def __getitem__(self, index): if isinstance(index, slice): return [self.get_frame(i) for i in range(*index.indices(self.frame_cnt))] if (index < 0): index += self.frame_cnt if (index < 0): raise IndexError('index out of range') return self.get_frame(index) def __iter__(self): self._set_real_position(0) return self def __next__(self): img = self.read() if (img is not None): return img else: raise StopIteration next = __next__ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._vcap.release()
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): 'Read the frame images from a directory and join them as a video\n\n Args:\n frame_dir (str): The directory containing video frames.\n video_file (str): Output filename.\n fps (float): FPS of the output video.\n fourcc (str): Fourcc of the output video, this should be compatible\n with the output file type.\n filename_tmpl (str): Filename template with the index as the variable.\n start (int): Starting frame index.\n end (int): Ending frame index.\n show_progress (bool): Whether to show a progress bar.\n ' if (end == 0): ext = filename_tmpl.split('.')[(- 1)] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, ('The start frame not found: ' + first_file)) img = cv2.imread(first_file) (height, width) = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): filename = osp.join(frame_dir, filename_tmpl.format(i)) img = cv2.imread(filename) vwriter.write(img) vwriter.release()
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): 'Read an optical flow map.\n\n Args:\n flow_or_path (ndarray or str): A flow map or filepath.\n quantize (bool): whether to read quantized pair, if set to True,\n remaining args will be passed to :func:`dequantize_flow`.\n concat_axis (int): The axis that dx and dy are concatenated,\n can be either 0 or 1. Ignored if quantize is False.\n\n Returns:\n ndarray: Optical flow represented as a (h, w, 2) numpy array\n ' if isinstance(flow_or_path, np.ndarray): if ((flow_or_path.ndim != 3) or (flow_or_path.shape[(- 1)] != 2)): raise ValueError('Invalid flow with shape {}'.format(flow_or_path.shape)) return flow_or_path elif (not is_str(flow_or_path)): raise TypeError('"flow_or_path" must be a filename or numpy array, not {}'.format(type(flow_or_path))) if (not quantize): with open(flow_or_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError('Invalid flow file: {}'.format(flow_or_path)) else: if (header != 'PIEH'): raise IOError('Invalid flow file: {}, header does not contain PIEH'.format(flow_or_path)) w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, ((w * h) * 2)).reshape((h, w, 2)) else: assert (concat_axis in [0, 1]) cat_flow = imread(flow_or_path, flag='unchanged') if (cat_flow.ndim != 2): raise IOError('{} is not a valid quantized flow file, its dimension is {}.'.format(flow_or_path, cat_flow.ndim)) assert ((cat_flow.shape[concat_axis] % 2) == 0) (dx, dy) = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) return flow.astype(np.float32)
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs): 'Write optical flow to file.\n\n If the flow is not quantized, it will be saved as a .flo file losslessly,\n otherwise a jpeg image which is lossy but of much smaller size. (dx and dy\n will be concatenated horizontally into a single image if quantize is True.)\n\n Args:\n flow (ndarray): (h, w, 2) array of optical flow.\n filename (str): Output filepath.\n quantize (bool): Whether to quantize the flow and save it to 2 jpeg\n images. If set to True, remaining args will be passed to\n :func:`quantize_flow`.\n concat_axis (int): The axis that dx and dy are concatenated,\n can be either 0 or 1. Ignored if quantize is False.\n ' if (not quantize): with open(filename, 'wb') as f: f.write('PIEH'.encode('utf-8')) np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) flow = flow.astype(np.float32) flow.tofile(f) f.flush() else: assert (concat_axis in [0, 1]) (dx, dy) = quantize_flow(flow, *args, **kwargs) dxdy = np.concatenate((dx, dy), axis=concat_axis) imwrite(dxdy, filename)
def quantize_flow(flow, max_val=0.02, norm=True): 'Quantize flow to [0, 255].\n\n After this step, the size of flow will be much smaller, and can be\n dumped as jpeg images.\n\n Args:\n flow (ndarray): (h, w, 2) array of optical flow.\n max_val (float): Maximum value of flow, values beyond\n [-max_val, max_val] will be truncated.\n norm (bool): Whether to divide flow values by image width/height.\n\n Returns:\n tuple[ndarray]: Quantized dx and dy.\n ' (h, w, _) = flow.shape dx = flow[(..., 0)] dy = flow[(..., 1)] if norm: dx = (dx / w) dy = (dy / h) flow_comps = [quantize(d, (- max_val), max_val, 255, np.uint8) for d in [dx, dy]] return tuple(flow_comps)
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): 'Recover from quantized flow.\n\n Args:\n dx (ndarray): Quantized dx.\n dy (ndarray): Quantized dy.\n max_val (float): Maximum value used when quantizing.\n denorm (bool): Whether to multiply flow values with width/height.\n\n Returns:\n ndarray: Dequantized flow.\n ' assert (dx.shape == dy.shape) assert ((dx.ndim == 2) or ((dx.ndim == 3) and (dx.shape[(- 1)] == 1))) (dx, dy) = [dequantize(d, (- max_val), max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow
def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'): 'Use flow to warp img\n\n Args:\n img (ndarray, float or uint8): Image to be warped.\n flow (ndarray, float): Optical Flow.\n filling_value (int): The missing pixels will be set with filling_value.\n interpolate_mode (str): bilinear -> Bilinear Interpolation;\n nearest -> Nearest Neighbor.\n\n Returns:\n ndarray: Warped image with the same shape of img\n ' interpolate_mode_dict = {'bilinear': 0, 'nearest': 1} assert (len(img.shape) == 3) assert ((len(flow.shape) == 3) and (flow.shape[2] == 2)) assert (flow.shape[:2] == img.shape[:2]) assert (interpolate_mode in interpolate_mode_dict.keys()) interpolate_mode = interpolate_mode_dict[interpolate_mode] img_float = img.astype(np.float64) out = flow_warp_c(img_float, flow.astype(np.float64), filling_value=filling_value, interpolate_mode=interpolate_mode) return out
@requires_executable('ffmpeg') def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): 'Convert a video with ffmpeg.\n\n This provides a general api to ffmpeg, the executed command is::\n\n `ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`\n\n Options(kwargs) are mapped to ffmpeg commands with the following rules:\n\n - key=val: "-key val"\n - key=True: "-key"\n - key=False: ""\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n pre_options (str): Options appears before "-i <in_file>".\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = [] for (k, v) in kwargs.items(): if isinstance(v, bool): if v: options.append('-{}'.format(k)) elif (k == 'log_level'): assert (v in ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace']) options.append('-loglevel {}'.format(v)) else: options.append('-{} {}'.format(k, v)) cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file, ' '.join(options), out_file) if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
@requires_executable('ffmpeg') def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs): 'Resize a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\n ratio (tuple or float): Expected resize ratio, (2, 0.5) means\n (w*2, h*0.5).\n keep_ar (bool): Whether to keep original aspect ratio.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' if ((size is None) and (ratio is None)): raise ValueError('expected size or ratio must be specified') elif ((size is not None) and (ratio is not None)): raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if (not keep_ar): options['vf'] = 'scale={}:{}'.format(size[0], size[1]) else: options['vf'] = 'scale=w={}:h={}:force_original_aspect_ratio=decrease'.format(size[0], size[1]) else: if (not isinstance(ratio, tuple)): ratio = (ratio, ratio) options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format(ratio[0], ratio[1]) convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): 'Cut a clip from a video.\n\n Args:\n in_file (str): Input video filename.\n out_file (str): Output video filename.\n start (None or float): Start time (in seconds).\n end (None or float): End time (in seconds).\n vcodec (None or str): Output video codec, None for unchanged.\n acodec (None or str): Output audio codec, None for unchanged.\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = (end - start) convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg') def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): 'Concatenate multiple videos into a single one.\n\n Args:\n video_list (list): A list of video filenames\n out_file (str): Output video filename\n vcodec (None or str): Output video codec, None for unchanged\n acodec (None or str): Output audio codec, None for unchanged\n log_level (str): Logging level of ffmpeg.\n print_cmd (bool): Whether to print the final ffmpeg command.\n ' (_, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
class Color(Enum): 'An enum that defines common colors.\n\n Contains red, green, blue, cyan, yellow, magenta, white and black.\n ' red = (0, 0, 255) green = (0, 255, 0) blue = (255, 0, 0) cyan = (255, 255, 0) yellow = (0, 255, 255) magenta = (255, 0, 255) white = (255, 255, 255) black = (0, 0, 0)
def color_val(color): 'Convert various input to color tuples.\n\n Args:\n color (:obj:`Color`/str/tuple/int/ndarray): Color inputs\n\n Returns:\n tuple[int]: A tuple of 3 integers indicating BGR channels.\n ' if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert (len(color) == 3) for channel in color: assert ((channel >= 0) and (channel <= 255)) return color elif isinstance(color, int): assert ((color >= 0) and (color <= 255)) return (color, color, color) elif isinstance(color, np.ndarray): assert ((color.ndim == 1) and (color.size == 3)) assert np.all(((color >= 0) & (color <= 255))) color = color.astype(np.uint8) return tuple(color) else: raise TypeError('Invalid type for color: {}'.format(type(color)))
def init_dist(launcher, backend='nccl', **kwargs): if (mp.get_start_method(allow_none=True) is None): mp.set_start_method('spawn') if (launcher == 'pytorch'): _init_dist_pytorch(backend, **kwargs) elif (launcher == 'mpi'): _init_dist_mpi(backend, **kwargs) elif (launcher == 'slurm'): _init_dist_slurm(backend, **kwargs) else: raise ValueError('Invalid launcher type: {}'.format(launcher))
def _init_dist_pytorch(backend, **kwargs): rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device((rank % num_gpus)) dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs): raise NotImplementedError
def _init_dist_slurm(backend, **kwargs): raise NotImplementedError
def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)
def get_root_logger(log_level=logging.INFO): logger = logging.getLogger() if (not logger.hasHandlers()): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=log_level) (rank, _) = get_dist_info() if (rank != 0): logger.setLevel('ERROR') return logger
def _prepare_data(img, img_transform, cfg, device): ori_shape = img.shape (img, img_shape, pad_shape, scale_factor) = img_transform(img, scale=cfg.data.test.img_scale) img = to_tensor(img).to(device).unsqueeze(0) img_meta = [dict(ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=False)] return dict(img=[img], img_meta=[img_meta])
def _inference_single(model, img, img_transform, cfg, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
def _inference_generator(model, imgs, img_transform, cfg, device): for img in imgs: (yield _inference_single(model, img, img_transform, cfg, device))
def inference_detector(model, imgs, cfg, device='cuda:0'): img_transform = ImageTransform(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg) model = model.to(device) model.eval() if (not isinstance(imgs, list)): return _inference_single(model, imgs, img_transform, cfg, device) else: return _inference_generator(model, imgs, img_transform, cfg, device)
def show_result(img, result, dataset='coco', score_thr=0.3): class_names = get_classes(dataset) labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(result)] labels = np.concatenate(labels) bboxes = np.vstack(result) img = mmcv.imread(img) mmcv.imshow_det_bboxes(img.copy(), bboxes, labels, class_names=class_names, score_thr=score_thr)
def parse_losses(losses): log_vars = OrderedDict() for (loss_name, loss_value) in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value)) else: raise TypeError('{} is not a tensor or list of tensors'.format(loss_name)) loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key))) log_vars['loss'] = loss for name in log_vars: log_vars[name] = log_vars[name].item() return (loss, log_vars)
def batch_processor(model, data, train_mode): losses = model(**data) (loss, log_vars) = parse_losses(losses) outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) return outputs
def train_detector(model, dataset, cfg, distributed=False, validate=False, logger=None): if (logger is None): logger = get_root_logger(cfg.log_level) if distributed: _dist_train(model, dataset, cfg, validate=validate) else: _non_dist_train(model, dataset, cfg, validate=validate)
def _dist_train(model, dataset, cfg, validate=False): data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True)] model = MMDistributedDataParallel(model.cuda()) runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir, cfg.log_level) optimizer_config = DistOptimizerHook(**cfg.optimizer_config) runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config) runner.register_hook(DistSamplerSeedHook()) if validate: if isinstance(model.module, RPN): runner.register_hook(CocoDistEvalRecallHook(cfg.data.val)) elif (cfg.data.val.type == 'CocoDataset'): runner.register_hook(CocoDistEvalmAPHook(cfg.data.val)) elif (cfg.data.val.type in ['KittiLiDAR', 'KittiRGB']): runner.register_hook(KittiEvalmAPHook(cfg.data.val, interval=cfg.eval_interval)) else: runner.register_hook(DistEvalmAPHook(cfg.data.val)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(model, dataset, cfg, validate=False): data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False)] model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir, cfg.log_level) runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config) if validate: if isinstance(model.module, RPN): runner.register_hook(CocoDistEvalRecallHook(cfg.data.val)) elif (cfg.data.val.type == 'CocoDataset'): runner.register_hook(CocoDistEvalmAPHook(cfg.data.val)) elif (cfg.data.val.type in ['KittiLiDAR', 'KittiRGB']): runner.register_hook(KittiEvalmAPHook(cfg.data.val, interval=cfg.eval_interval)) else: runner.register_hook(DistEvalmAPHook(cfg.data.val)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
class TargetEncoder(): def __init__(self, box_coders, region_similarity): self._similarity_fn = getattr(regionSimilarity, region_similarity)() self._box_coder = getattr(boxCoders, box_coders)() @property def box_coder(self): return self._box_coder def assign(self, anchors, gt_boxes, anchors_mask=None, gt_classes=None, pos_iou_thr=0.6, neg_iou_thr=0.45, positive_fraction=None, sample_size=512): return create_target_np(anchors, gt_boxes, anchors_mask, gt_classes, similarity_fn=self._similarity_fn, box_encoding_fn=self._box_coder.encode, matched_threshold=pos_iou_thr, unmatched_threshold=neg_iou_thr, positive_fraction=positive_fraction, rpn_batch_size=sample_size, norm_by_num_examples=False, box_code_size=self.box_coder.code_size)
def voc_classes(): return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def imagenet_det_classes(): return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
def imagenet_vid_classes(): return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
def coco_classes(): return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def kitti_classes(): return ['car', 'pedestrians', 'cyclists']
def get_classes(dataset): 'Get class names of a dataset.' alias2name = {} for (name, aliases) in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if (dataset in alias2name): labels = eval((alias2name[dataset] + '_classes()')) else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)): for res_type in result_types: assert (res_type in ['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints']) if mmcv.is_str(coco): coco = COCO(coco) assert isinstance(coco, COCO) if (result_types == ['proposal_fast']): ar = fast_eval_recall(result_file, coco, np.array(max_dets)) for (i, num) in enumerate(max_dets): print('AR@{}\t= {:.4f}'.format(num, ar[i])) return assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file) img_ids = coco.getImgIds() for res_type in result_types: iou_type = ('bbox' if (res_type == 'proposal') else res_type) cocoEval = COCOeval(coco, coco_dets, iou_type) cocoEval.params.imgIds = img_ids if (res_type == 'proposal'): cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(max_dets) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize()
def fast_eval_recall(results, coco, max_dets, iou_thrs=np.arange(0.5, 0.96, 0.05)): if mmcv.is_str(results): assert results.endswith('.pkl') results = mmcv.load(results) elif (not isinstance(results, list)): raise TypeError('results must be a list of numpy arrays or a filename, not {}'.format(type(results))) gt_bboxes = [] img_ids = coco.getImgIds() for i in range(len(img_ids)): ann_ids = coco.getAnnIds(imgIds=img_ids[i]) ann_info = coco.loadAnns(ann_ids) if (len(ann_info) == 0): gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if (ann.get('ignore', False) or ann['iscrowd']): continue (x1, y1, w, h) = ann['bbox'] bboxes.append([x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)]) bboxes = np.array(bboxes, dtype=np.float32) if (bboxes.shape[0] == 0): bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls(gt_bboxes, results, max_dets, iou_thrs, print_summary=False) ar = recalls.mean(axis=1) return ar
def xyxy2xywh(bbox): _bbox = bbox.tolist() return [_bbox[0], _bbox[1], ((_bbox[2] - _bbox[0]) + 1), ((_bbox[3] - _bbox[1]) + 1)]
def proposal2json(dataset, results): json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results
def det2json(dataset, results): json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] json_results.append(data) return json_results
def segm2json(dataset, results): json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] (det, seg) = results[idx] for label in range(len(det)): bboxes = det[label] segms = seg[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] json_results.append(data) return json_results
def results2json(dataset, results, out_file): if isinstance(results[0], list): json_results = det2json(dataset, results) elif isinstance(results[0], tuple): json_results = segm2json(dataset, results) elif isinstance(results[0], np.ndarray): json_results = proposal2json(dataset, results) else: raise TypeError('invalid type of results') mmcv.dump(json_results, out_file)
class DistEvalHook(Hook): def __init__(self, dataset, interval=1): if isinstance(dataset, Dataset): self.dataset = dataset elif isinstance(dataset, dict): self.dataset = obj_from_dict(dataset, datasets, {'test_mode': True}) else: raise TypeError('dataset must be a Dataset object or a dict, not {}'.format(type(dataset))) self.interval = interval self.lock_dir = None def _barrier(self, rank, world_size): 'Due to some issues with `torch.distributed.barrier()`, we have to\n implement this ugly barrier function.\n ' if (rank == 0): for i in range(1, world_size): tmp = osp.join(self.lock_dir, '{}.pkl'.format(i)) while (not osp.exists(tmp)): time.sleep(1) for i in range(1, world_size): tmp = osp.join(self.lock_dir, '{}.pkl'.format(i)) os.remove(tmp) else: tmp = osp.join(self.lock_dir, '{}.pkl'.format(rank)) mmcv.dump([], tmp) while osp.exists(tmp): time.sleep(1) def before_run(self, runner): self.lock_dir = osp.join(runner.work_dir, '.lock_map_hook') if (runner.rank == 0): if osp.exists(self.lock_dir): shutil.rmtree(self.lock_dir) mmcv.mkdir_or_exist(self.lock_dir) def after_run(self, runner): if (runner.rank == 0): shutil.rmtree(self.lock_dir) def after_train_epoch(self, runner): if (not self.every_n_epochs(runner, self.interval)): return runner.model.eval() results = [None for _ in range(len(self.dataset))] prog_bar = mmcv.ProgressBar(len(self.dataset)) for idx in range(runner.rank, len(self.dataset), runner.world_size): data = self.dataset[idx] data_gpu = scatter(collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0] with torch.no_grad(): result = runner.model(return_loss=False, rescale=True, **data_gpu) results[idx] = result batch_size = runner.world_size for _ in range(batch_size): prog_bar.update() if (runner.rank == 0): print('\n') self._barrier(runner.rank, runner.world_size) for i in range(1, runner.world_size): tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i)) tmp_results = mmcv.load(tmp_file) for idx in range(i, len(results), runner.world_size): results[idx] = tmp_results[idx] os.remove(tmp_file) self.evaluate(runner, results) else: tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank)) mmcv.dump(results, tmp_file) self._barrier(runner.rank, runner.world_size) self._barrier(runner.rank, runner.world_size) def evaluate(self): raise NotImplementedError
class CocoDistEvalRecallHook(DistEvalHook): def __init__(self, dataset, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): super(CocoDistEvalRecallHook, self).__init__(dataset) self.proposal_nums = np.array(proposal_nums, dtype=np.int32) self.iou_thrs = np.array(iou_thrs, dtype=np.float32) def evaluate(self, runner, results): ar = fast_eval_recall(results, self.dataset.coco, self.proposal_nums, self.iou_thrs) for (i, num) in enumerate(self.proposal_nums): runner.log_buffer.output['AR@{}'.format(num)] = ar[i] runner.log_buffer.ready = True
class CocoDistEvalmAPHook(DistEvalHook): def evaluate(self, runner, results): tmp_file = osp.join(runner.work_dir, 'temp_0.json') results2json(self.dataset, results, tmp_file) res_types = (['bbox', 'segm'] if runner.model.module.with_mask else ['bbox']) cocoGt = self.dataset.coco cocoDt = cocoGt.loadRes(tmp_file) imgIds = cocoGt.getImgIds() for res_type in res_types: iou_type = res_type cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.imgIds = imgIds cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() field = '{}_mAP'.format(res_type) runner.log_buffer.output[field] = cocoEval.stats[0] runner.log_buffer.ready = True os.remove(tmp_file)
class DistEvalmAPHook(DistEvalHook): def evaluate(self, runner, results): gt_bboxes = [] gt_labels = [] gt_ignore = ([] if self.dataset.with_crowd else None) for i in range(len(self.dataset)): ann = self.dataset.get_ann_info(i) bboxes = ann['bboxes'] labels = ann['labels'] if (gt_ignore is not None): ignore = np.concatenate([np.zeros(bboxes.shape[0], dtype=np.bool), np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)]) gt_ignore.append(ignore) bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) labels = np.concatenate([labels, ann['labels_ignore']]) gt_bboxes.append(bboxes) gt_labels.append(labels) if (hasattr(self.dataset, 'year') and (self.dataset.year == 2007)): ds_name = 'voc07' else: ds_name = self.dataset.CLASSES (mean_ap, eval_results) = eval_map(results, gt_bboxes, gt_labels, gt_ignore=gt_ignore, scale_ranges=None, iou_thr=0.5, dataset=ds_name, print_summary=True) runner.log_buffer.output['mAP'] = mean_ap runner.log_buffer.ready = True
class KittiEvalmAPHook(Hook): def __init__(self, dataset, interval=5): if isinstance(dataset, Dataset): self.dataset = dataset elif isinstance(dataset, dict): self.dataset = utils.get_dataset(dataset) else: raise TypeError('dataset must be a Dataset object or a dict, not {}'.format(type(dataset))) self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=4, num_workers=2, shuffle=False, collate_fn=utils.merge_second_batch) self.interval = interval self.lock_dir = None def _barrier(self, rank, world_size): 'Due to some issues with `torch.distributed.barrier()`, we have to\n implement this ugly barrier function.\n ' if (rank == 0): for i in range(1, world_size): tmp = osp.join(self.lock_dir, '{}.pkl'.format(i)) while (not osp.exists(tmp)): time.sleep(1) for i in range(1, world_size): tmp = osp.join(self.lock_dir, '{}.pkl'.format(i)) os.remove(tmp) else: tmp = osp.join(self.lock_dir, '{}.pkl'.format(rank)) mmcv.dump([], tmp) while osp.exists(tmp): time.sleep(1) def before_run(self, runner): self.lock_dir = osp.join(runner.work_dir, '.lock_map_hook') if (runner.rank == 0): if osp.exists(self.lock_dir): shutil.rmtree(self.lock_dir) mmcv.mkdir_or_exist(self.lock_dir) def after_run(self, runner): if (runner.rank == 0): shutil.rmtree(self.lock_dir) def after_train_epoch(self, runner): if (not self.every_n_epochs(runner, self.interval)): return if (runner.rank == 0): runner.model.eval() prog_bar = mmcv.ProgressBar(len(self.dataset)) class_names = get_classes('kitti') results = [] for (i, data) in enumerate(self.dataloader): with torch.no_grad(): result = runner.model(return_loss=False, **data) image_shape = (375, 1242) for re in result: img_idx = re['image_idx'] if (re['bbox'] is not None): box2d = re['bbox'] box3d = re['box3d_camera'] labels = re['label_preds'] scores = re['scores'] alphas = re['alphas'] anno = kitti.get_start_result_anno() num_example = 0 for (bbox2d, bbox3d, label, score, alpha) in zip(box2d, box3d, labels, scores, alphas): if ((bbox2d[0] > image_shape[1]) or (bbox2d[1] > image_shape[0])): continue if ((bbox2d[2] < 0) or (bbox2d[3] < 0)): continue bbox2d[2:] = np.minimum(bbox2d[2:], image_shape[::(- 1)]) bbox2d[:2] = np.maximum(bbox2d[:2], [0, 0]) anno['name'].append(class_names[label]) anno['truncated'].append(0.0) anno['occluded'].append(0) anno['alpha'].append(alpha) anno['bbox'].append(bbox2d) anno['dimensions'].append(bbox3d[[3, 4, 5]]) anno['location'].append(bbox3d[:3]) anno['rotation_y'].append(bbox3d[6]) anno['score'].append(score) num_example += 1 if (num_example != 0): anno = {n: np.stack(v) for (n, v) in anno.items()} results.append(anno) else: results.append(kitti.empty_result_anno()) else: results.append(kitti.empty_result_anno()) num_example = results[(- 1)]['name'].shape[0] results[(- 1)]['image_idx'] = np.array(([img_idx] * num_example), dtype=np.int64) batch_size = len(data['sample_idx']) for _ in range(batch_size): prog_bar.update() self._barrier(runner.rank, runner.world_size) self.evaluate(runner, results) else: self._barrier(runner.rank, runner.world_size) def evaluate(self, runner, results): gt_annos = kitti.get_label_annos(self.dataset.label_prefix, self.dataset.sample_ids) result = get_official_eval_result(gt_annos, results, current_classes=0) runner.logger.info(result) runner.log_buffer.ready = True
def weighted_nll_loss(pred, label, weight, avg_factor=None): if (avg_factor is None): avg_factor = max(torch.sum((weight > 0)).float().item(), 1.0) raw = F.nll_loss(pred, label, reduction='none') return (torch.sum((raw * weight))[None] / avg_factor)
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): if (avg_factor is None): avg_factor = max(torch.sum((weight > 0)).float().item(), 1.0) raw = F.cross_entropy(pred, label, reduction='none') if reduce: return (torch.sum((raw * weight))[None] / avg_factor) else: return ((raw * weight) / avg_factor)
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): if (avg_factor is None): avg_factor = max(torch.sum((weight > 0)).float().item(), 1.0) return (F.binary_cross_entropy_with_logits(pred, label.float(), weight.float(), reduction='sum')[None] / avg_factor)
def sigmoid_focal_loss(pred, target, weight, gamma=2.0, alpha=0.25, reduction='mean'): pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (((1 - pred_sigmoid) * target) + (pred_sigmoid * (1 - target))) weight = (((alpha * target) + ((1 - alpha) * (1 - target))) * weight) weight = (weight * pt.pow(gamma)) loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * weight) reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return loss.mean() elif (reduction_enum == 2): return loss.sum()
def weighted_sigmoid_focal_loss(pred, target, weight, gamma=2.0, alpha=0.25, avg_factor=None, num_classes=80): if (avg_factor is None): avg_factor = ((torch.sum((weight > 0)).float().item() / num_classes) + 1e-06) return (sigmoid_focal_loss(pred, target, weight, gamma=gamma, alpha=alpha, reduction='sum')[None] / avg_factor)
def mask_cross_entropy(pred, target, label): num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[(inds, label)].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None]
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): assert (beta > 0) assert ((pred.size() == target.size()) and (target.numel() > 0)) diff = torch.abs((pred - target)) loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta))) reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return (loss.sum() / pred.numel()) elif (reduction_enum == 2): return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): if (avg_factor is None): avg_factor = ((torch.sum((weight > 0)).float().item() / 4) + 1e-06) loss = smooth_l1_loss(pred, target, beta, reduction='none') return (torch.sum((loss * weight))[None] / avg_factor)
def l1_loss(pred, target, reduction='mean'): assert ((pred.size() == target.size()) and (target.numel() > 0)) loss = torch.abs((pred - target)) reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return (loss.sum() / pred.numel()) elif (reduction_enum == 2): return loss.sum()
def weighted_l1(pred, target, weight, avg_factor=None): if (avg_factor is None): avg_factor = ((torch.sum((weight > 0)).float().item() / 4) + 1e-06) loss = l1_loss(pred, target, reduction='none') return (torch.sum((loss * weight))[None] / avg_factor)
def accuracy(pred, target, topk=1): if isinstance(topk, int): topk = (topk,) return_single = True else: return_single = False maxk = max(topk) (_, pred_label) = pred.topk(maxk, 1, True, True) pred_label = pred_label.t() correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / pred.size(0)))) return (res[0] if return_single else res)
def huber_loss(error, delta): abs_error = torch.abs(error) quadratic = torch.min(abs_error, torch.full_like(abs_error, fill_value=delta)) losses = ((0.5 * (quadratic ** 2)) + (delta * (abs_error - quadratic))) return torch.mean(losses)
def split_combined_polys(polys, poly_lens, polys_per_mask): 'Split the combined 1-D polys into masks.\n\n A mask is represented as a list of polys, and a poly is represented as\n a 1-D array. In dataset, all masks are concatenated into a single 1-D\n tensor. Here we need to split the tensor into original representations.\n\n Args:\n polys (list): a list (length = image num) of 1-D tensors\n poly_lens (list): a list (length = image num) of poly length\n polys_per_mask (list): a list (length = image num) of poly number\n of each mask\n\n Returns:\n list: a list (length = image num) of list (length = mask num) of\n list (length = poly num) of numpy array\n ' mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = mmcv.slice_list(polys_single, polys_lens_single) mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list
class VoxelGenerator(): def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000): point_cloud_range = np.array(point_cloud_range, dtype=np.float32) voxel_size = np.array(voxel_size, dtype=np.float32) grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size) grid_size = np.round(grid_size).astype(np.int64) self._voxel_size = voxel_size self._point_cloud_range = point_cloud_range self._max_num_points = max_num_points self._max_voxels = max_voxels self._grid_size = grid_size def generate(self, points): return points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._max_num_points, True, self._max_voxels) @property def voxel_size(self): return self._voxel_size @property def max_num_points_per_voxel(self): return self._max_num_points @property def point_cloud_range(self): return self._point_cloud_range @property def grid_size(self): return self._grid_size
def rotate_nms_torch(rbboxes, scores, pre_max_size=None, post_max_size=None, iou_threshold=0.5): if (pre_max_size is not None): num_keeped_scores = scores.shape[0] pre_max_size = min(num_keeped_scores, pre_max_size) (scores, indices) = torch.topk(scores, k=pre_max_size) rbboxes = rbboxes[indices] if (len(rbboxes) == 0): keep = torch.empty((0,), dtype=torch.int64) else: ret = nms_gpu(rbboxes, scores, iou_threshold) keep = ret[:post_max_size] if (keep.shape[0] == 0): return None if (pre_max_size is not None): return indices[keep] else: return keep
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)): if (bucket_size_mb > 0): bucket_size_bytes = ((bucket_size_mb * 1024) * 1024) buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if (tp not in buckets): buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced)
def allreduce_grads(model, coalesce=True, bucket_size_mb=(- 1)): grads = [param.grad.data for param in model.parameters() if (param.requires_grad and (param.grad is not None))] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook): def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1)): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb def after_train_iter(self, runner): runner.optimizer.zero_grad() runner.outputs['loss'].backward() allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb) if (self.grad_clip is not None): self.clip_grads(runner.model.parameters()) runner.optimizer.step()
class CocoDataset(CustomDataset): def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = {cat_id: (i + 1) for (i, cat_id) in enumerate(self.cat_ids)} self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info) def _filter_imgs(self, min_size=32): 'Filter images too small or without ground truths.' valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) for (i, img_info) in enumerate(self.img_infos): if (self.img_ids[i] not in ids_with_ann): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): 'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, mask_polys, poly_lens.\n ' gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)] if ann['iscrowd']: gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) if with_mask: gt_masks.append(self.coco.annToMask(ann)) mask_polys = [p for p in ann['segmentation'] if (len(p) >= 6)] poly_lens = [len(p) for p in mask_polys] gt_mask_polys.append(mask_polys) gt_poly_lens.extend(poly_lens) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann
class ConcatDataset(_ConcatDataset): '\n Same as torch.utils.data.dataset.ConcatDataset, but\n concat the group flag for image aspect ratio.\n ' def __init__(self, datasets): '\n flag: Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n ' super(ConcatDataset, self).__init__(datasets) if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags)
def build_dataloader(dataset, imgs_per_gpu, workers_per_gpu, num_gpus=1, dist=True, **kwargs): if dist: (rank, world_size) = get_dist_info() sampler = DistributedGroupSampler(dataset, imgs_per_gpu, world_size, rank) batch_size = imgs_per_gpu num_workers = workers_per_gpu else: if (not kwargs.get('shuffle', True)): sampler = None else: sampler = GroupSampler(dataset, imgs_per_gpu) batch_size = (num_gpus * imgs_per_gpu) num_workers = (num_gpus * workers_per_gpu) data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), pin_memory=False, **kwargs) return data_loader
class GroupSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1): assert hasattr(dataset, 'flag') self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.flag = dataset.flag.astype(np.int64) self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for (i, size) in enumerate(self.group_sizes): self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) def __iter__(self): indices = [] for (i, size) in enumerate(self.group_sizes): if (size == 0): continue indice = np.where((self.flag == i))[0] assert (len(indice) == size) np.random.shuffle(indice) num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice)) indice = np.concatenate([indice, indice[:num_extra]]) indices.append(indice) indices = np.concatenate(indices) indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))] indices = np.concatenate(indices) indices = torch.from_numpy(indices).long() assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples
class DistributedGroupSampler(Sampler): 'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n ' def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None): if (num_replicas is None): num_replicas = get_world_size() if (rank is None): rank = get_rank() self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for (i, j) in enumerate(self.group_sizes): self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) self.total_size = (self.num_samples * self.num_replicas) def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) indices = [] for (i, size) in enumerate(self.group_sizes): if (size > 0): indice = np.where((self.flag == i))[0] assert (len(indice) == size) indice = indice[list(torch.randperm(int(size), generator=g))].tolist() extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice)) indice += indice[:extra] indices += indice assert (len(indices) == self.total_size) indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))] offset = (self.num_samples * self.rank) indices = indices[offset:(offset + self.num_samples)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
def to_tensor(data): 'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n ' if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif (isinstance(data, Sequence) and (not mmcv.is_str(data))): return [to_tensor(d) for d in data] elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) elif (data is None): return data else: raise TypeError('type {} cannot be converted to tensor.'.format(type(data)))
def random_scale(img_scales, mode='range'): 'Randomly select a scale from a list of scales or scale ranges.\n\n Args:\n img_scales (list[tuple]): Image scale or scale range.\n mode (str): "range" or "value".\n\n Returns:\n tuple: Sampled image scale.\n ' num_scales = len(img_scales) if (num_scales == 1): img_scale = img_scales[0] elif (num_scales == 2): if (mode == 'range'): img_scale_long = [max(s) for s in img_scales] img_scale_short = [min(s) for s in img_scales] long_edge = np.random.randint(min(img_scale_long), (max(img_scale_long) + 1)) short_edge = np.random.randint(min(img_scale_short), (max(img_scale_short) + 1)) img_scale = (long_edge, short_edge) elif (mode == 'value'): img_scale = img_scales[np.random.randint(num_scales)] else: if (mode != 'value'): raise ValueError('Only "value" mode supports more than 2 image scales') img_scale = img_scales[np.random.randint(num_scales)] return img_scale
def show_ann(coco, img, ann_info): plt.imshow(mmcv.bgr2rgb(img)) plt.axis('off') coco.showAnns(ann_info) plt.show()