code
stringlengths
17
6.64M
def main(): dataset = KittiRawLMDBDataset(split='benchmark2', mode='train', size=(640, 192), supp_idxs=None, use_depth=True, interpolate_depth=False, use_depth_hints=False, use_poses=False, use_strong_aug=False, as_torch=False, use_aug=False, log_timings=False) print(dataset) dataset.play(fps=100)
@register('syns_patches') class SYNSPatchesDataset(BaseDataset): "SYNS Patches dataset based on SYNS panorama images/LiDAR.\n\n See each function for details.\n\n Attributes:\n :param mode: (str) Split mode to load. {val, test, all}\n :param size: (Sequence[int]) Target image training size as (w, h).\n :param use_depth: (bool) If `True`, load ground truth LiDAR depth maps.\n :param use_edges: (bool) If `True`, load ground truth LiDAR depth maps.\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call 'self.augment' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n " def __init__(self, mode: str, size: tuple[(int, int)]=(640, 192), use_depth: bool=True, use_edges: bool=True, **kwargs): super().__init__(**kwargs) self.mode = mode (self.w, self.h) = self.size = size self.use_depth = use_depth self.use_edges = use_edges self.edges_dir = 'edges' if self.use_aug: raise ValueError('SYNS Patches is a testing dataset, no augmentations should be applied.') if ((self.mode in {'val', 'test'}) and (self.use_depth or self.use_edges)): raise ValueError('Cannot use ground truth depth when loading the testing or validation split!') (self.w_full, self.h_full) = self.size_full = (1242, 376) (self.split_file, self.items) = self.parse_items() if (self.h > self.w): raise ValueError(f'Target image height={self.h} is greater than image width={self.w}. Did you pass these in the correct order? Expected (width, height).') def __len__(self) -> int: 'Number of items in dataset.' return len(self.items) def parse_items(self) -> tuple[(Path, list[tuple[(str, str)]])]: 'Return the list of items in the dataset.' return syp.load_split(self.mode) def load(self, item: int, x: dict, y: dict, m: dict) -> BatchData: "Load single item in dataset.\n\n NOTE: Items in each dict will be converted into `torch.Tensors` if `self.as_torch=True`.\n\n :param item: (int) Dataset item to load.\n :param x: {\n imgs: (ndarray) (h, w, 3) Target image for depth estimation.\n }\n :param y: {\n images: (ndarray) (h, w, 3) x['imgs'] (NO AUGMENTATIONS).\n K: (ndarray) (4, 4) Camera intrinsics parameters.\n depth: (Optional[ndarray]) (h, w, 1) Ground truth LiDAR depth map.\n edges: (Optional[ndarray]) (h, w, 1) Ground truth depth edges.\n }\n :param m: {\n items: (str) Loaded dataset item.\n category: (str) Image category label.\n subcategory: (str) Image subcategory label.\n aug (str): Augmentations applied to current item.\n errors: (List[str]): List of errors when loading previous items.\n data_timer (MultiLevelTimer): Timing information for current item.\n }\n " d = self.items[item] (m['cat'], m['subcat']) = syp.load_category(d[0]) with self.timer('Image'): (img, img_res) = self.load_image(d) x['imgs'] = io.pil2np(img_res) y['imgs'] = io.pil2np(img) if self.use_depth: with self.timer('Depth'): y['depth'] = self.load_depth(d) if self.edges_dir: with self.timer('Edges'): edges = self.load_edges(d) y['edges'] = io.pil2np(edges)[(..., None)].astype(bool) y['K'] = syp.load_intrinsics() return (x, y, m) def load_image(self, data: tuple[(str, str)]) -> tuple[(Image, Image)]: "Load and resize a single image.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (Image) (self.w, self.h) Loaded PIL image.\n " file = syp.get_image_file(*data) img = Image.open(file) img_res = img.resize(self.size, resample=Image.BILINEAR) return (img, img_res) def load_depth(self, data: tuple[(str, str)]) -> np.ndarray: "Load a single depth map.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (ndarray) (self.full_w, self.full_h) Loaded numpy depth map.\n " file = syp.get_depth_file(*data) depth = np.load(file) return depth def load_edges(self, data: tuple[(str, str)]) -> Image: "Load a single depth edge map.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (Image) (self.full_w, self.full_h) Loaded PIL edge map.\n " file = syp.get_edges_file(data[0], self.edges_dir, data[1]) edges = Image.open(file) return edges def transform(self, x: dict, y: dict, m: dict) -> BatchData: 'Apply ImageNet standarization to the images processed by the network `x`.' x['imgs'] = ops.standardize(x['imgs']) return (x, y, m) def create_axs(self) -> Axes: 'Create the axis structure required for plotting.' (_, axs) = plt.subplots(((1 + self.use_depth) + (self.edges_dir is not None))) if isinstance(axs, plt.Axes): axs = np.array([axs]) plt.tight_layout() return axs def show(self, x: dict, y: dict, m: dict, axs: Optional[Axes]=None) -> None: 'Show a single dataset item.' axs = (self.create_axs() if (axs is None) else axs) axs[0].imshow(y['imgs']) if self.use_depth: axs[1].imshow(viz.rgb_from_disp(y['depth'], invert=True)) if self.edges_dir: axs[(- 1)].imshow(y['edges'])
def get_split_file(mode: str) -> Path: 'Get scene information file based on the scene number.' file = ((PATHS['syns_patches'] / 'splits') / f'{mode}_files.txt') return file
def get_scenes() -> list[Path]: 'Get paths to each of the scenes.' return sorted((path for path in PATHS['syns_patches'].iterdir() if (path.is_dir() and (path.stem != 'splits'))))
def get_scene_files(scene_dir: Path) -> dict[(str, Sequence[Path])]: 'Get paths to all subdir files for a given scene.' files = {key: sorted((scene_dir / key).iterdir()) for key in SUBDIRS if (scene_dir / key).is_dir()} return files
def get_info_file(scene: str) -> Path: 'Get scene information file based on the scene number.' paths = (PATHS['syns_patches'] / scene).iterdir() return next((f for f in paths if (f.suffix == '.txt')))
def get_image_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'images') / file)
def get_depth_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'depths') / file).with_suffix('.npy')
def get_edges_file(scene: str, subdir: str, file: str) -> Path: 'Get image filename based on scene and item number.' assert ('edges' in subdir), f'Must provide an "edges" directory. ({subdir})' assert (subdir in SUBDIRS), f"Non-existent edges directory. ({subdir} vs. {[s for s in SUBDIRS if ('edges' in s)]})" return (((PATHS['syns_patches'] / scene) / subdir) / file)
def load_info(scene: str) -> Sequence[str]: 'Load the scene information.' file = get_info_file(scene) info = readlines(file, encoding='latin-1') return info
def load_category(scene: str) -> tuple[(str, str)]: 'Load the scene category and subcategory.' info = load_info(scene) category = info[1].replace('Scene Category: ', '') try: (cat, subcat) = category.split(': ') except ValueError: (cat, subcat) = category.split(' - ') return (cat, subcat)
def load_split(mode) -> tuple[(Path, list[list[str]])]: 'Load the list of scenes and filenames that are part of the test split.\n\n Test split file is given as "SEQ ITEM":\n ```\n 01 00.png\n 10 11.png\n ```\n ' file = get_split_file(mode) lines = readlines(file) lines = [l.split(' ') for l in lines] return (file, lines)
def load_intrinsics() -> NDArray: 'Computes the virtual camera intrinsics for the `Kitti` based SYNS Patches.\n\n We compute this based on the desired FOV, using basic trigonometry.\n\n :return: (ndarray) (4, 4) Camera intrinsic parameters.\n ' (Fy, Fx) = KITTI_FOV (h, w) = KITTI_SHAPE (cx, cy) = ((w // 2), (h // 2)) fx = (cx / np.tan((np.deg2rad(Fx) / 2))) fy = (cy / np.tan((np.deg2rad(Fy) / 2))) K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) return K
def main(): import matplotlib.pyplot as plt for scene in get_scenes(): print(scene.stem) (_, axs11) = plt.subplots(3, 3) plt.tight_layout() (_, axs12) = plt.subplots(3, 3) plt.tight_layout() (_, axs21) = plt.subplots(3, 3) plt.tight_layout() (_, axs22) = plt.subplots(3, 3) plt.tight_layout() axs1 = np.concatenate((axs11.flatten(), axs12.flatten())) axs2 = np.concatenate((axs21.flatten(), axs22.flatten())) images = sorted((scene / 'images').iterdir()) depths = sorted((scene / 'depth_images').iterdir()) [ax.cla() for ax in axs1] [ax.cla() for ax in axs2] [ax.imshow(Image.open(f)) for (ax, f) in zip(axs1, images)] [ax.imshow(Image.open(f)) for (ax, f) in zip(axs2, depths)] [ax.set_title(i) for (i, ax) in enumerate(axs1)] plt.show()
class ChamferDistanceFunction(torch.autograd.Function): @staticmethod def forward(ctx, xyz1, xyz2): (batchsize, n, _) = xyz1.size() (_, m, _) = xyz2.size() xyz1 = xyz1.contiguous() xyz2 = xyz2.contiguous() dist1 = torch.zeros(batchsize, n) dist2 = torch.zeros(batchsize, m) idx1 = torch.zeros(batchsize, n, dtype=torch.int) idx2 = torch.zeros(batchsize, m, dtype=torch.int) if (not xyz1.is_cuda): cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) else: dist1 = dist1.cuda() dist2 = dist2.cuda() idx1 = idx1.cuda() idx2 = idx2.cuda() cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2) ctx.save_for_backward(xyz1, xyz2, idx1, idx2) return (dist1, dist2) @staticmethod def backward(ctx, graddist1, graddist2): (xyz1, xyz2, idx1, idx2) = ctx.saved_tensors graddist1 = graddist1.contiguous() graddist2 = graddist2.contiguous() gradxyz1 = torch.zeros(xyz1.size()) gradxyz2 = torch.zeros(xyz2.size()) if (not graddist1.is_cuda): cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) else: gradxyz1 = gradxyz1.cuda() gradxyz2 = gradxyz2.cuda() cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) return (gradxyz1, gradxyz2)
class ChamferDistance(torch.nn.Module): def __init__(self): super().__init__() if (cd is None): raise RuntimeError(f'Chamfer Distance module unavailable') def forward(self, xyz1, xyz2): return ChamferDistanceFunction.apply(xyz1, xyz2)
class Database(): _database = None _protocol = None _length = None def __init__(self, path: PathLike, readahead: bool=True, pre_open: bool=False): 'Base class for LMDB-backed _databases.\n\n :param path: (PathLike) Path to the database.\n :param readahead: (bool) If `True`, enables the filesystem readahead mechanism.\n :param pre_open: (bool) If `True`, the first iterations will be faster, but it will raise error when doing multi-gpu training.\n If `False`, the database will open when you will retrieve the first item.\n ' self.path = str(path) self.readahead = readahead self.pre_open = pre_open self._has_fetched_an_item = False @property def database(self): if (self._database is None): self._database = lmdb.open(path=self.path, readonly=True, readahead=self.readahead, max_spare_txns=256, lock=False) return self._database @database.deleter def database(self): if (self._database is not None): self._database.close() self._database = None @property def protocol(self): 'Read the pickle protocol contained in the database.\n\n :return: The set of available keys.\n ' if (self._protocol is None): self._protocol = self._get(item='protocol', convert_key=(lambda key: key.encode('ascii')), convert_value=(lambda value: pickle.loads(value))) return self._protocol @property def keys(self): 'Read the keys contained in the database.\n\n :return: The set of available keys.\n ' protocol = self.protocol keys = self._get(item='keys', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return keys def __len__(self): 'Returns the number of keys available in the database.\n\n :return: The number of keys.\n ' if (self._length is None): self._length = len(self.keys) return self._length def __getitem__(self, item): 'Retrieves an item or a list of items from the database.\n\n :param item: A key or a list of keys.\n :return: A value or a list of values.\n ' self._has_fetched_an_item = True if (not isinstance(item, list)): item = self._get(item, self._convert_key, self._convert_value) else: item = self._gets(item, self._convert_keys, self._convert_values) return item def __contains__(self, item): 'Check if a given key is in the database.' return (item in self.keys) def index(self, index): 'Retrieves an item or a list of items from the database from an integer index.\n\n :param index: An index or a list of indexes.\n :return: A value or a list of values.\n ' key = self.keys[index] return (key, self[key]) def _get(self, item, convert_key, convert_value): 'Instantiates a transaction and its associated cursor to fetch an item.\n\n :param item: A key.\n :param convert_key:\n :param convert_value:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: item = self._fetch(cursor, item, convert_key, convert_value) self._keep_database() return item def _gets(self, items, convert_keys, convert_values): 'Instantiates a transaction and its associated cursor to fetch a list of items.\n\n :param items: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: items = self._fetchs(cursor, items, convert_keys, convert_values) self._keep_database() return items def _fetch(self, cursor, key, convert_key, convert_value): 'Retrieve a value given a key.\n\n :param cursor:\n :param key: A key.\n :param convert_key:\n :param convert_value:\n :return: A value.\n ' key = convert_key(key=key) value = cursor.get(key=key) value = convert_value(value=value) return value def _fetchs(self, cursor, keys, convert_keys, convert_values): 'Retrieve a list of values given a list of keys.\n\n :param cursor:\n :param keys: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return: A list of values.\n ' keys = convert_keys(keys=keys) (_, values) = list(zip(*cursor.getmulti(keys))) values = convert_values(values=values) return values def _convert_key(self, key): 'Converts a key into a byte key.\n\n :param key: A key.\n :return: A byte key.\n ' return pickle.dumps(key, protocol=self.protocol) def _convert_keys(self, keys): 'Converts keys into byte keys.\n\n :param keys: A list of keys.\n :return: A list of byte keys.\n ' return [self._convert_key(key=key) for key in keys] def _convert_value(self, value): 'Converts a byte value back into a value.\n\n :param value: A byte value.\n :return: A value\n ' return pickle.loads(value) def _convert_values(self, values): 'Converts bytes values back into values.\n\n :param values: A list of byte values.\n :return: A list of values.\n ' return [self._convert_value(value=value) for value in values] def _keep_database(self): 'Checks if the database must be deleted.' if ((not self.pre_open) and (not self._has_fetched_an_item)): del self.database def __iter__(self): 'Provides an iterator over the keys when iterating over the database.' return iter(self.keys) def __del__(self): 'Closes the database properly.' del self.database
class ImageDatabase(Database): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL Image image.\n ' return Image.open(io.BytesIO(value))
class MaskDatabase(ImageDatabase): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL image.\n ' return Image.open(io.BytesIO(value)).convert('1')
class LabelDatabase(Database): pass
class ArrayDatabase(Database): _dtype = None _shape = None @property def dtype(self): if (self._dtype is None): protocol = self.protocol self._dtype = self._get(item='dtype', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._dtype @property def shape(self): if (self._shape is None): protocol = self.protocol self._shape = self._get(item='shape', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._shape def _convert_value(self, value): return np.frombuffer(value, dtype=self.dtype).reshape(self.shape) def _convert_values(self, values): return np.frombuffer(b''.join(values), dtype=self.dtype).reshape(((len(values),) + self.shape))
class TensorDatabase(ArrayDatabase): def _convert_value(self, value): return torch.from_numpy(super(TensorDatabase, self)._convert_value(value)) def _convert_values(self, values): return torch.from_numpy(super(TensorDatabase, self)._convert_values(values))
def write_image_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) for (key, value) in tqdm(sorted(d.items())): with env.begin(write=True) as txn: with value.open('rb') as file: key = pickle.dumps(key) txn.put(key=key, value=file.read(), dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
def write_label_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
def write_array_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) value = next(iter(d.values())) shape = value.shape dtype = value.dtype with env.begin(write=True) as txn: key = pickle.dumps('shape') value = pickle.dumps(shape) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('dtype') value = pickle.dumps(dtype) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
class DenseL1Error(nn.Module): 'Dense L1 loss averaged over channels.' def forward(self, pred, target): return (pred - target).abs().mean(dim=1, keepdim=True)
class DenseL2Error(nn.Module): 'Dense L2 distance.' def forward(self, pred, target): return (pred - target).pow(2).sum(dim=1, keepdim=True).clamp(min=ops.eps(pred)).sqrt()
class SSIMError(nn.Module): 'Structural similarity error.' def __init__(self): super().__init__() self.pool: nn.Module = nn.AvgPool2d(kernel_size=3, stride=1) self.refl: nn.Module = nn.ReflectionPad2d(padding=1) self.eps1: float = (0.01 ** 2) self.eps2: float = (0.03 ** 2) def forward(self, pred: Tensor, target: Tensor) -> Tensor: 'Compute the structural similarity error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, c, h, w) Structural similarity error.\n ' (x, y) = (self.refl(pred), self.refl(target)) (mu_x, mu_y) = (self.pool(x), self.pool(y)) sig_x = (self.pool((x ** 2)) - (mu_x ** 2)) sig_y = (self.pool((y ** 2)) - (mu_y ** 2)) sig_xy = (self.pool((x * y)) - (mu_x * mu_y)) num = ((((2 * mu_x) * mu_y) + self.eps1) * ((2 * sig_xy) + self.eps2)) den = ((((mu_x ** 2) + (mu_y ** 2)) + self.eps1) * ((sig_x + sig_y) + self.eps2)) loss = ((1 - (num / den)) / 2).clamp(min=0, max=1) return loss
class PhotoError(nn.Module): 'Class for computing the photometric error.\n From Monodepth (https://arxiv.org/abs/1609.03677)\n\n The SSIMLoss can be deactivated by setting `weight_ssim=0`.\n The L1Loss can be deactivated by setting `weight_ssim=1`.\n Otherwise, the loss is a weighted combination of both.\n\n Attributes:\n :param weight_ssim: (float) Weight controlling the contribution of the SSIMLoss. L1 weight is `1 - ssim_weight`.\n ' def __init__(self, weight_ssim: float=0.85): super().__init__() if ((weight_ssim < 0) or (weight_ssim > 1)): raise ValueError(f'Invalid SSIM weight. ({weight_ssim} vs. [0, 1])') self.weight_ssim: float = weight_ssim self.weight_l1: float = (1 - self.weight_ssim) self.ssim: Optional[nn.Module] = (SSIMError() if (self.weight_ssim > 0) else None) self.l1: Optional[nn.Module] = (DenseL1Error() if (self.weight_l1 > 0) else None) def forward(self, pred: Tensor, target: Tensor) -> Tensor: 'Compute the photometric error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, 1, h, w) Photometric error.\n ' (b, _, h, w) = pred.shape loss = pred.new_zeros((b, 1, h, w)) if self.ssim: loss += (self.weight_ssim * self.ssim(pred, target).mean(dim=1, keepdim=True)) if self.l1: loss += (self.weight_l1 * self.l1(pred, target)) return loss
@register(('img_recon', 'feat_recon', 'autoenc_recon')) class ReconstructionLoss(nn.Module): "Class to compute the reconstruction loss when synthesising new views.\n\n Contributions:\n - Min reconstruction error: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Static pixel automasking: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Explainability mask: From SfM-Learner (https://arxiv.org/abs/1704.07813)\n - Uncertainty mask: From Klodt (https://openaccess.thecvf.com/content_ECCV_2018/papers/Maria_Klodt_Supervising_the_new_ECCV_2018_paper.pdf)\n\n :param loss_name: (str) Loss type to use.\n :param use_min: (bool) If `True`, take the final loss as the minimum across all available views.\n :param use_automask: (bool) If `True`, mask pixels where the original support image has a lower loss than the warped counterpart.\n :param mask_name: (Optional[str]) Weighting mask used. {'explainability', 'uncertainty', None}\n " def __init__(self, loss_name: str='ssim', use_min: bool=False, use_automask: bool=False, mask_name: Optional[str]=None): super().__init__() self.loss_name = loss_name self.use_min = use_min self.use_automask = use_automask self.mask_name = mask_name if (self.mask_name not in {'explainability', 'uncertainty', None}): raise ValueError(f'Invalid mask type: {self.mask_name}') self._photo = {'ssim': PhotoError(weight_ssim=0.85), 'l1': DenseL1Error(), 'l2': DenseL2Error()}[self.loss_name] def apply_mask(self, err: Tensor, mask: Optional[Tensor]=None) -> Tensor: 'Apply a weighting mask to a photometric loss error.\n\n :param err: (Tensor) (b, n, h, w) Photometric error to mask.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask to apply.\n :return: (Tensor) (b, n, h, w) The weighted photometric error.\n ' if (self.mask_name and (mask is None)): raise ValueError('Must provide a "mask" when masking...') if (self.mask_name == 'explainability'): err *= mask elif (self.mask_name == 'uncertainty'): err = ((err * (- mask).exp()) + mask) return err def apply_automask(self, err: Tensor, source: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> tuple[(Tensor, Tensor)]: 'Compute and apply an automask based on the identity reconstruction error.\n\n :param err: (Tensor) (b, 1, h, w) The photometric error for between target and warped support frames.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (Optional[Tensor]) (*n, b, 3, h, w) Original support images.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n err: (Tensor) (b, 1, h, w) The automasked photometric error.\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating pixels NOT removed by the automasking procedure.\n )\n ' err_static = self.compute_photo(source, target, mask=mask) err_static += (ops.eps(err_static) * torch.randn_like(err_static)) err = torch.cat((err, err_static), dim=1) (err, idxs) = torch.min(err, dim=1, keepdim=True) automask = (idxs == 0) return (err, automask) def compute_photo(self, pred: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> Tensor: 'Compute the dense photometric between multiple predictions and a single target.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (Tensor) (b, 1, h, w) The reduced photometric error.\n ' if (pred.ndim == 4): err = self._photo(pred, target) else: target = target[None].expand_as(pred) err = self._photo(pred.flatten(0, 1), target.flatten(0, 1)) err = err.squeeze(1).unflatten(0, pred.shape[:2]).permute(1, 0, 2, 3) err = self.apply_mask(err, mask) err = (err.min(dim=1, keepdim=True)[0] if self.use_min else err.mean(dim=1, keepdim=True)) return err def forward(self, pred: Tensor, target: Tensor, source: Optional[Tensor]=None, mask: Optional[Tensor]=None) -> LossData: 'Compute the reconstruction loss between two images.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (Optional[Tensor]) (*n, b, 3, h, w) Original support images.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n loss: (Tensor) (,) Scalar loss.\n loss_dict: {\n (Optional) (If using automasking)\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating pixels NOT removed by the automasking procedure.\n }\n )\n ' ld = {} err = self.compute_photo(pred, target, mask) if self.use_automask: if (source is None): raise ValueError('Must provide the original "source" images when automasking...') (err, automask) = self.apply_automask(err, source, target, mask) ld['automask'] = automask loss = err.mean() return (loss, ld)
def l1_loss(pred: Tensor, target: Tensor) -> Tensor: 'Dense L1 loss.' loss = (pred - target).abs() return loss
def log_l1_loss(pred: Tensor, target: Tensor) -> Tensor: 'Dense Log L1 loss.' loss = (1 + l1_loss(pred, target)).log() return loss
def berhu_loss(pred: Tensor, target: Tensor, delta: float=0.2, dynamic: bool=True) -> Tensor: 'Dense berHu loss.\n\n :param pred: (Tensor) Network prediction.\n :param target: (Tensor) Ground-truth target.\n :param delta: (float) Threshold above which the loss switches from L1.\n :param dynamic: (bool) If `True`, set threshold dynamically, using `delta` as the max error percentage.\n :return: (Tensor) The computed `berhu` loss.\n ' diff = l1_loss(pred, target) delta = (delta if (not dynamic) else (delta * diff.max())) diff_delta = ((diff.pow(2) + delta.pow(2)) / ((2 * delta) + ops.eps(pred))) loss = torch.where((diff <= delta), diff, diff_delta) return loss
@register(('depth_regr', 'stereo_const')) class RegressionLoss(nn.Module): 'Class implementing a supervised regression loss.\n\n NOTE: The DepthHints automask is not computed here. Instead, we rely on the `MonoDepthModule` to compute it.\n Probably not the best way of doing it, but it keeps this loss clean...\n\n Contributions:\n - Virtual stereo consistency: From Monodepth (https://arxiv.org/abs/1609.03677)\n - Proxy berHu regression: From Kuznietsov (https://arxiv.org/abs/1702.02706)\n - Proxy LogL1 regression: From Depth Hints (https://arxiv.org/abs/1909.09051)\n - Proxy loss automasking: From Depth Hints/Monodepth2 (https://arxiv.org/abs/1909.09051)\n\n :param loss_name: (str) Loss type to use. {l1, log_l1, berhu}\n :param use_automask: (bool) If `True`, use DepthHints automask based on the pred/hints errors.\n ' def __init__(self, loss_name: str='berhu', use_automask: bool=False): super().__init__() self.loss_name = loss_name self.use_automask = use_automask self.criterion = {'l1': l1_loss, 'log_l1': log_l1_loss, 'berhu': berhu_loss}[self.loss_name] def forward(self, pred: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> LossData: if (mask is None): mask = torch.ones_like(target) err = (mask * self.criterion(pred, target)) loss = (err.sum() / mask.sum()) return (loss, {'err_regr': err, 'mask_regr': mask})
@register('autoencoder') class AutoencoderNet(nn.Module): "Image autoencoder network.\n From FeatDepth (https://arxiv.org/abs/2007.10603)\n\n Heavily based on the Depth network with some changes:\n - Single decoder\n - Produces 3 sigmoid channels (RGB)\n - No skip connections, it's an autoencoder!\n\n :param enc_name: (str) `timm` encoder key (check `timm.list_models()`).\n :param pretrained: (bool) If `True`, returns an encoder pretrained on ImageNet.\n :param dec_name: (str) Custom decoder type to use.\n :param out_scales: (Sequence[int]) List of multi-scale output downsampling factor as `2**s.`\n " def __init__(self, enc_name: str='resnet18', pretrained: bool=True, dec_name: str='monodepth', out_scales: Union[(int, Sequence[int])]=(0, 1, 2, 3)): super().__init__() self.enc_name = enc_name self.pretrained = pretrained self.dec_name = dec_name self.out_scales = ([out_scales] if isinstance(out_scales, int) else out_scales) if (self.dec_name not in DECODERS): raise KeyError(f'Invalid decoder key. ({self.dec_name} vs. {DECODERS.keys()}') self.encoder = timm.create_model(self.enc_name, features_only=True, pretrained=pretrained) self.num_ch_enc = self.encoder.feature_info.channels() self.enc_sc = self.encoder.feature_info.reduction() self.decoder = DECODERS[self.dec_name](num_ch_enc=self.num_ch_enc, enc_sc=self.enc_sc, upsample_mode='nearest', use_skip=False, out_sc=self.out_scales, out_ch=3, out_act='sigmoid') def forward(self, x: Tensor) -> TensorDict: 'Image autoencoder forward pass.\n\n :param x: (Tensor) (b, 3, h, w) Input image.\n :return: {\n autoenc_feats: (list(Tensor)) Autoencoder encoder multi-scale features.\n autoenc_imgs: (TensorDict) (b, 1, h/2**s, w/2**s) Dict mapping from scales to image reconstructions.\n }\n ' feat = self.encoder(x) out = {'autoenc_feats': feat} k = 'autoenc_imgs' out[k] = self.decoder(feat) out[k] = {k2: out[k][k2] for k2 in sorted(out[k])} return out
class StructurePerception(nn.Module): 'Self-attention Structure Perception Module.' def forward(self, x): (b, c, h, w) = x.shape value = x.view(b, c, (- 1)) query = value key = value.permute(0, 2, 1) att = (query @ key) att = (att.max(dim=(- 1), keepdim=True)[0] - att) out = (att.softmax(dim=(- 1)) @ value) out = (x + out.view(b, c, h, w)) return out
class DetailEmphasis(nn.Module): 'Detail Emphasis Module.\n\n :param ch: (int) Number of input/output channels.\n ' def __init__(self, ch: int): super().__init__() self.conv = nn.Sequential(conv3x3(ch, ch), nn.BatchNorm2d(ch), nn.ReLU(inplace=True)) self.att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.Sigmoid()) def forward(self, x): x = self.conv(x) x = (x + (x * self.att(x))) return x
class CADepthDecoder(nn.Module): "From CADepth (https://arxiv.org/abs/2112.13047)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) self.convs[f'detail_emphasis_{i}'] = DetailEmphasis(num_ch_in) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.structure_perception = StructurePerception() self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, enc_features): out = {} x = self.structure_perception(enc_features[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [enc_features[idx]] x = torch.cat(x, 1) x = self.convs[f'detail_emphasis_{i}'](x) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
def get_discrete_bins(n: int, mode: str='linear') -> Tensor: 'Get the discretized disparity value depending on number of bins and quantization mode.\n\n All modes assume that we are quantizing sigmoid disparity, and therefore are in range [0, 1].\n Quantization modes:\n - linear: Evenly spaces out all bins.\n - exp: Spaces bins out exponentially, providing finer detail at low disparity values, ie higher depth values.\n\n :param n: (int) Number of bins to use.\n :param mode: (str) Quantization mode. {linear, exp}\n :return: (Tensor) (1, n, 1, 1) Computed discrete disparity bins.\n ' bins = (torch.arange(n) / n) if (mode == 'linear'): pass elif (mode == 'exp'): max_depth = Tensor(200) bins = torch.exp((torch.log(max_depth) * (bins - 1))) else: raise ValueError(f'Invalid discretization mode. "{mode}"') return bins.view(1, n, 1, 1)
class SelfAttentionBlock(nn.Module): 'Self-Attention Block.\n\n :param ch: (int) Number of input/output channels.\n ' def __init__(self, ch): super().__init__() self.query_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.key_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.value_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) def forward(self, x): (b, c, h, w) = x.shape query = self.query_conv(x).flatten((- 2), (- 1)) key = self.key_conv(x).flatten((- 2), (- 1)).permute(0, 2, 1) value = self.value_conv(x).flatten((- 2), (- 1)) att = (query @ key) out = (att.softmax(dim=(- 1)) @ value) out = out.view(b, c, h, w) return out
class DDVNetDecoder(nn.Module): "From DDVNet (https://arxiv.org/abs/2003.13951)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.num_bins = 128 self.bins = nn.Parameter(get_discrete_bins(self.num_bins, mode='linear')) self.convs = OrderedDict() self.convs['att'] = SelfAttentionBlock(self.num_ch_enc[(- 1)]) for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], (self.num_bins * self.out_ch)) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] self.logits = {} def expected_disparity(self, logits: Tensor) -> Tensor: 'Maps discrete disparity logits into the expected weighted disparity.\n\n :param logits: (Tensor) (b, n, h, w) Raw unnormalized predicted probabilities.\n :return: (Tensor) (b, 1, h, w) Expected disparity map.\n ' probs = logits.softmax(dim=1) disp = (probs * self.bins).sum(dim=1, keepdim=True) return disp def argmax_disparity(self, logits: Tensor) -> Tensor: idx = logits.argmax(dim=1) one_hot = F.one_hot(idx, self.num_bins).permute(0, 3, 1, 2) disp = (one_hot * self.bins).sum(dim=1, keepdim=True) return disp def forward(self, enc_features: Sequence[Tensor]) -> dict[(int, Tensor)]: out = {} x = self.convs['att'](enc_features[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [enc_features[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): logits = self.convs[f'outconv_{i}'](x) self.logits[i] = logits out[i] = torch.cat([self.expected_disparity(l) for l in logits.chunk(self.out_ch, dim=1)], dim=1) return out
def upsample_block(in_ch: int, out_ch: int, upsample_mode: str='nearest') -> nn.Module: 'Layer to upsample the input by a factor of 2 without skip connections.' return nn.Sequential(conv_block(in_ch, out_ch), nn.Upsample(scale_factor=2, mode=upsample_mode), conv_block(out_ch, out_ch))
class ChannelAttention(nn.Module): 'Channel Attention Module incorporating Squeeze & Exicitation.\n\n :param in_ch: (int) Number of input channels.\n :param ratio: (int) Channels reduction ratio in bottleneck.\n ' def __init__(self, in_ch: int, ratio: int=16): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(in_ch, (in_ch // ratio), bias=False), nn.ReLU(inplace=True), nn.Linear((in_ch // ratio), in_ch, bias=False)) self.init_weights() def init_weights(self): 'Kaiming weight initialization.' for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') def forward(self, x): att = self.avg_pool(x) att = self.fc(att.squeeze()).sigmoid() return (x * att[(..., None, None)])
class AttentionBlock(nn.Module): "Attention Block incorporating channel attention.\n\n :param in_ch: (int) Number of input channels.\n :param skip_ch: (int) Number of channels in skip connection features.\n :param out_ch: (Optional[int]) Number of output channels.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n " def __init__(self, in_ch: int, skip_ch: int, out_ch: Optional[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.layers = nn.Sequential(ChannelAttention(self.in_ch), conv3x3(self.in_ch, self.out_ch), nn.ReLU(inplace=True)) def forward(self, x, x_skip): return self.layers(torch.cat((F.interpolate(x, scale_factor=2, mode=self.upsample_mode), x_skip), dim=1))
class DiffNetDecoder(nn.Module): "From DiffNet (https://arxiv.org/abs/2110.09482)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = nn.ModuleDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_skip = self.num_ch_enc[idx] self.convs[f'upconv_{i}'] = AttentionBlock(num_ch_in, num_ch_skip, num_ch_out, self.upsample_mode) else: self.convs[f'upconv_{i}'] = upsample_block(num_ch_in, num_ch_out, self.upsample_mode) for i in range(4): self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, enc_features): out = {} x = enc_features[(- 1)] for i in range(4, (- 1), (- 1)): scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x = self.convs[f'upconv_{i}'](x, enc_features[idx]) else: x = self.convs[f'upconv_{i}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
class FSEBlock(nn.Module): def __init__(self, in_ch: int, skip_ch: int, out_ch: Optional[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.reduction = 16 self.avg_pool = nn.AdaptiveAvgPool2d(1) self.se = nn.Sequential(nn.Linear(self.in_ch, (self.in_ch // self.reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((self.in_ch // self.reduction), self.in_ch, bias=False)) self.conv = nn.Sequential(conv1x1(self.in_ch, self.out_ch, bias=True), nn.ReLU(inplace=True)) def forward(self, x: Tensor, xs_skip: Sequence[Tensor]) -> Tensor: x = F.interpolate(x, scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) y = self.avg_pool(x).squeeze() y = self.se(y).sigmoid() y = y[(..., None, None)].expand_as(x) x = self.conv((x * y)) return x
class HRDepthDecoder(nn.Module): "From HRDepth (https://arxiv.org/pdf/2012.07356.pdf)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (not self.use_skip): raise ValueError('HRDepth decoder must use skip connections.') if (len(self.enc_sc) == 4): warnings.warn('HRDepth requires 5 scales, but the provided backbone has only 4. The first scale will be duplicated and upsampled!') self.enc_sc = ([(self.enc_sc[0] // 2)] + self.enc_sc) self.num_ch_enc = ([self.num_ch_enc[0]] + self.num_ch_enc) if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [(ch // 2) for ch in self.num_ch_enc[1:]] self.num_ch_dec = ([(self.num_ch_dec[0] // 2)] + self.num_ch_dec) self.all_idx = ['01', '11', '21', '31', '02', '12', '22', '03', '13', '04'] self.att_idx = ['31', '22', '13', '04'] self.non_att_idx = ['01', '11', '21', '02', '12', '03'] self.convs = nn.ModuleDict() for j in range(5): for i in range((5 - j)): ch_in = self.num_ch_enc[i] if ((i == 0) and (j != 0)): ch_in //= 2 if ((i == 0) and (j == 4)): ch_in = (self.num_ch_enc[(i + 1)] // 2) ch_out = (ch_in // 2) self.convs[f'{i}{j}_conv_0'] = conv_block(ch_in, ch_out) if ((i == 0) and (j == 4)): ch_in = ch_out ch_out = self.num_ch_dec[i] self.convs[f'{i}{j}_conv_1'] = conv_block(ch_in, ch_out) for idx in self.att_idx: (row, col) = (int(idx[0]), int(idx[1])) self.convs[f'{idx}_att'] = FSEBlock(in_ch=(self.num_ch_enc[(row + 1)] // 2), skip_ch=(self.num_ch_enc[row] + (self.num_ch_dec[(row + 1)] * (col - 1))), upsample_mode=self.upsample_mode) for idx in self.non_att_idx: (row, col) = (int(idx[0]), int(idx[1])) if (col == 1): self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]), out_ch=self.num_ch_dec[(row + 1)]) else: self.convs[f'{idx}_down'] = conv1x1(in_ch=(((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]) + (self.num_ch_dec[(row + 1)] * (col - 1))), out_ch=(2 * self.num_ch_dec[(row + 1)]), bias=False) self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=(2 * self.num_ch_dec[(row + 1)]), out_ch=self.num_ch_dec[(row + 1)]) channels = self.num_ch_dec for (i, c) in enumerate(channels): if (i in self.out_sc): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(c, self.out_ch), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def nested_conv(self, convs: Sequence[nn.Module], x: Tensor, xs_skip: Sequence[Tensor]) -> Tensor: x = F.interpolate(convs[0](x), scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) if (len(convs) == 3): x = convs[2](x) x = convs[1](x) return x def forward(self, enc_features: Sequence[Tensor]) -> dict[(int, Tensor)]: if (len(enc_features) == 4): enc_features = ([F.interpolate(enc_features[0], scale_factor=2, mode=self.upsample_mode)] + enc_features) feat = {f'{i}0': f for (i, f) in enumerate(enc_features)} for idx in self.all_idx: (row, col) = (int(idx[0]), int(idx[1])) xs_skip = [feat[f'{row}{i}'] for i in range(col)] if (idx in self.att_idx): feat[f'{idx}'] = self.convs[f'{idx}_att'](self.convs[f'{(row + 1)}{(col - 1)}_conv_0'](feat[f'{(row + 1)}{(col - 1)}']), xs_skip) elif (idx in self.non_att_idx): conv = [self.convs[f'{(row + 1)}{(col - 1)}_conv_0'], self.convs[f'{(row + 1)}{(col - 1)}_conv_1']] if (col != 1): conv.append(self.convs[f'{idx}_down']) feat[f'{idx}'] = self.nested_conv(conv, feat[f'{(row + 1)}{(col - 1)}'], xs_skip) x = feat['04'] x = self.convs['04_conv_0'](x) x = self.convs['04_conv_1'](F.interpolate(x, scale_factor=2, mode=self.upsample_mode)) out_feat = [x, feat['04'], feat['13'], feat['22']] out = {i: self.convs[f'outconv_{i}'](f) for (i, f) in enumerate(out_feat) if (i in self.out_sc)} return out
def main(): num_enc_ch = [64, 64, 128, 256, 512] enc_sc = [2, 4, 8, 16, 32] (b, h, w) = (4, 256, 512) enc_features = [torch.rand((b, c, (h // s), (w // s))) for (s, c) in zip(enc_sc, num_enc_ch)] net = HRDepthDecoder(num_ch_enc=num_enc_ch, enc_sc=enc_sc, out_sc=range(4), out_ch=1) out = net(enc_features) [print(key, val.shape) for (key, val) in out.items()]
class MonodepthDecoder(nn.Module): "From Monodepth(2) (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) self.act = ACT[self.out_act] def forward(self, enc_feat: Sequence[Tensor]) -> TensorDict: out = {} x = enc_feat[(- 1)] for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [enc_feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.act(self.convs[f'outconv_{i}'](x)) return out
class SubPixelConv(nn.Module): def __init__(self, ch_in: int, up_factor: int): super().__init__() ch_out = (ch_in * (up_factor ** 2)) self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=(3, 3), groups=ch_in, padding=1) self.shuffle = nn.PixelShuffle(up_factor) self.init_weights() def init_weights(self): nn.init.zeros_(self.conv.bias) self.conv.weight = nn.Parameter(self.conv.weight[::4].repeat_interleave(4, 0)) def forward(self, x): return self.shuffle(self.conv(x))
class SuperdepthDecoder(nn.Module): "From SuperDepth (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = nn.Sequential(conv_block(num_ch_in, num_ch_out), SubPixelConv(num_ch_out, up_factor=2), nn.ReLU(inplace=True)) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: if (i == 0): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(self.num_ch_dec[i], self.out_ch), self.activation) else: self.convs[f'outconv_{i}'] = nn.Sequential(conv_block(self.num_ch_dec[i], self.out_ch), SubPixelConv(self.out_ch, up_factor=(2 ** i)), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def forward(self, feat: Sequence[Tensor]) -> dict[(int, Tensor)]: out = {} x = feat[(- 1)] for i in range(4, (- 1), (- 1)): x = [self.convs[f'upconv_{i}_{0}'](x)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.convs[f'outconv_{i}'](x) return out
def conv1x1(in_ch: int, out_ch: int, bias: bool=True) -> nn.Conv2d: 'Layer to convolve input.' return nn.Conv2d(in_ch, out_ch, kernel_size=(1, 1), bias=bias)
def conv3x3(in_ch: int, out_ch: int, bias: bool=True) -> nn.Conv2d: 'Layer to pad and convolve input.' return nn.Conv2d(in_ch, out_ch, kernel_size=(3, 3), padding=1, padding_mode='reflect', bias=bias)
def conv_block(in_ch: int, out_ch: int) -> nn.Module: 'Layer to perform a convolution followed by ELU.' return nn.Sequential(OrderedDict({'conv': conv3x3(in_ch, out_ch), 'act': nn.ELU(inplace=True)}))
def _load_roots(): 'Helper to load the additional model & data roots from the repo config.' file = (REPO_ROOT / 'PATHS.yaml') if file.is_file(): paths = load_yaml(file) model_roots = [Path(p) for p in paths['MODEL_ROOTS']] data_roots = [Path(p) for p in paths['DATA_ROOTS']] else: warnings.warn(_msg.format(file=file)) (model_roots, data_roots) = ([], []) return (model_roots, data_roots)
def _build_paths(names: dict[(str, str)], roots: list[Path]): 'Helper to build the paths from a list of possible `roots`.\n NOTE: This returns the FIRST found path given by the order of roots. I.e. ordered by priority.\n ' paths = {} for (k, v) in names.items(): try: paths[k] = next((p for r in roots if (p := (r / v)).exists())) print(f'Found path "{k}": {paths[k]}') except StopIteration: warnings.warn(f'No valid path found for "{k}"!') return paths
def find_model_file(name: str) -> Path: 'Helper to find a model file in the available roots.' if (p := Path(name)).is_file(): return p try: return next((p for r in MODEL_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {MODEL_ROOTS}...')
def find_data_dir(name: str) -> Path: 'Helper to find a dataset directory in the available roots.' if (p := Path(name)).is_dir(): return p try: return next((p for r in DATA_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {DATA_ROOTS}...')
def register(name: Union[(str, Sequence[str])], type: Optional[str]=None, overwrite: bool=False) -> Callable: "Class decorator to build a registry of networks, losses & data available during training.\n\n :param name: (str|Sequence[str]) Key(s) to access class in the registry.\n :param type: (None|str) Registry to use. If `None`, guess from class name. {None, 'net', 'loss', 'data'}\n :param overwrite: (bool) If `True`, overwrite class `name` in registry `type`.\n :return:\n " def get_type(cls): 'Helper to identify registry `type` from class name.' try: return next((v for (k, v) in _NAME2TYPE.items() if cls.__name__.endswith(k))) except StopIteration: raise ValueError(f'Class matched no valid patterns. ("{cls.__name__}" vs. {set(_NAME2TYPE)})') def wrapper(cls): 'Decorator adding `cls` to the specified registry.' if (cls.__module__ == '__main__'): warnings.warn(f'Ignoring class "{cls.__name__}" created in the "__main__" module.') return cls ns = ((name,) if isinstance(name, str) else name) t = (type or get_type(cls)) if (t not in _REG): raise TypeError(f'Invalid `type`. ("{t}" vs. {set(_REG)})') reg = _REG[t] for n in ns: if ((not overwrite) and (tgt := reg.get(n))): raise ValueError(f'"{n}" already in "{t}" registry ({tgt} vs. {cls}). Set `overwrite=True` to overwrite it.') reg[n] = cls return cls return wrapper
@register('disp_mask') class MaskReg(nn.Module): 'Class implementing photometric loss masking regularization.\n From SfM-Learner (https://arxiv.org/abs/1704.07813)\n\n Based on the `explainability` mask, which predicts a weighting factor for each pixel in the photometric loss.\n To avoid the degenerate solution where all pixels are ignored, this regularization pushes all values towards 1\n using binary cross-entropy.\n ' def forward(self, x: Tensor) -> LossData: 'Mask regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid explainability mask.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = F.binary_cross_entropy(x, torch.ones_like(x)) return (loss, {})
@register('disp_occ') class OccReg(nn.Module): 'Class implementing disparity occlusion regularization.\n From DVSO (https://arxiv.org/abs/1807.02570)\n\n This regularization penalizes the overall disparity in the image, encouraging the network to select background\n disparities.\n\n NOTE: In this case we CANNOT apply mean normalization to the input disparity. By definition, this fixes the mean of\n all elements to 1, meaning the loss is impossible to minimize.\n\n NOTE: The benefits of applying this regularization to purely monocular supervision are unclear,\n since the loss could simply be optimized by making all disparities smaller.\n\n :param invert: (bool) If `True`, encourage foreground disparities instead of background.\n ' def __init__(self, invert: bool=False): super().__init__() self.invert = invert self._sign = nn.Parameter(torch.tensor(((- 1) if self.invert else 1)), requires_grad=False) def forward(self, x: Tensor) -> LossData: 'Occlusion regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid disparities.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = (self._sign * x.mean()) return (loss, {})
def get_device(device: Optional[Union[(str, torch.device)]]=None, /) -> torch.device: 'Create torch device from str or device. Defaults to CUDA if available.' if isinstance(device, torch.device): return device device = (device or ('cuda' if torch.cuda.is_available() else 'cpu')) return torch.device(device)
def get_latest_ckpt(path: PathLike, ignore: Sequence[str]=None, reverse: bool=False, suffix: str='.ckpt') -> Optional[Path]: 'Return latest or earliest checkpoint in the directory. Assumes files can be sorted in a meaningful way.\n\n :param path: (PathLike) Directory to search in.\n :param ignore: (Sequence[str]) Filenames to ignore, e.g. corrupted?\n :param reverse: (bool) If `True`, return earliest checkpoint.\n :param suffix: (str) Expected checkpoint file extension.\n :return: (Path) Latest checkpoint file or `None`.\n ' path = Path(path) ignore = (ignore or []) if (('last' not in ignore) and (last_file := (path / ('last' + suffix))).is_file()): return last_file files = filter((lambda f: ((f.suffix == suffix) and (f.name not in ignore))), sorted(path.iterdir(), reverse=(not reverse))) file = None with suppress(StopIteration): file = next(files) return file
def eps(x: Optional[torch.Tensor]=None, /) -> float: 'Return the `eps` value for the given `input` dtype. (default=float32 ~= 1.19e-7)' dtype = (torch.float32 if (x is None) else x.dtype) return torch.finfo(dtype).eps
def freeze(net: nn.Module, /) -> nn.Module: 'Fix all model parameters and prevent training.' for p in net.parameters(): p.requires_grad = False return net
def unfreeze(net: nn.Module, /) -> nn.Module: 'Make all model parameters trainable.' for p in net.parameters(): p.requires_grad = True return net
def allclose(net1: nn.Module, net2: nn.Module, /) -> bool: 'Check if two networks are equal.' for (p1, p2) in zip(net1.parameters(), net2.parameters()): try: if (not p1.allclose(p2)): return False except RuntimeError: return False return True
def num_parameters(net: nn.Module, /) -> int: 'Get number of trainable parameters in a network.' return sum((p.numel() for p in net.parameters() if p.requires_grad))
@map_container def to_torch(x: Any, /, permute: bool=True, device: Optional[torch.device]=None) -> Any: 'Convert given input to torch.Tensors\n\n :param x: (Any) Arbitrary structure to convert to tensors (see `map_apply`).\n :param permute: (bool) If `True`, permute to PyTorch convention (b, h, w, c) -> (b, c, h, w).\n :param device: (torch.device) Device to send tensors to.\n :return: (Any) Input structure, converted to tensors.\n ' if isinstance(x, (str, Timer, MultiLevelTimer)): return x x = torch.as_tensor(x, device=device) if (permute and (x.ndim > 2)): dim = [(- 1), (- 3), (- 2)] dim = (list(range((x.ndim - 3))) + dim) x = x.permute(dim) return x
@map_container def to_numpy(x: Any, /, permute: bool=True) -> Any: 'Convert given input to numpy.ndarrays.\n\n :param x: (Any) Arbitrary structure to convert to ndarrays (see map_apply).\n :param permute: (bool) If `True`, permute from PyTorch convention (b, c, h, w) -> (b, h, w, c).\n :return: (Any) Input structure, converted to ndarrays.\n ' if isinstance(x, (np.ndarray, str, Timer, MultiLevelTimer)): return x if (permute and (x.ndim > 2)): dim = [(- 2), (- 1), (- 3)] dim = (list(range((x.ndim - 3))) + dim) x = x.permute(dim) return x.detach().cpu().numpy()
@map_container def op(_x: Any, /, *args, fn: Union[(str, Callable)], **kwargs) -> Any: "Apply a function to and arbitrary input structure. `fn` can be either a function or a method to search on `_x`.\n\n Example:\n >>> out = fn(input, device, op='to') # Apply x.to(device) to each item in `x`\n >>> out = fn(input, func=torch.softmax, dim=1) # Apply torch.softmax(x, dim=1) to each item in `x`\n\n :param _x: (Any) Arbitrary structure to convert to tensors (see map_apply).\n :param args: (tuple) `Args` to forward to the given `func`.\n :param fn: (str|callable) Function to apply. If given a string, it will be searched as an attribute of `_x`.\n :param kwargs: (dict) `Kwargs` to forward to the given `op`.\n :return:\n " if isinstance(_x, (str, Timer, MultiLevelTimer)): return _x if isinstance(fn, str): fn = getattr(_x, fn) else: args = (_x, *args) return fn(*args, **kwargs)
@opt_args_deco def allow_np(fn: Optional[Callable], permute: bool=False) -> Callable: "Decorator to allow for numpy.ndarray inputs in a torch function.\n\n Main idea is to implement the function using torch ops and apply this decorator to also make it numpy friendly.\n Since numpy.ndarray and torch.Tensor share memory (when on CPU), there shouldn't be any overhead.\n\n The decorated function can have an arbitrary signature. We enforce that there should only be either np.ndarray\n or torch.Tensor inputs. All other args (int, float, str...) are left unchanged.\n " ann = fn.__annotations__ for (k, type) in ann.items(): if (type == torch.Tensor): ann[k] = Union[(NDArray, type)] @wraps(fn) def wrapper(*args, **kwargs): all_args = (args + tuple(kwargs.values())) any_np = any((isinstance(arg, np.ndarray) for arg in all_args)) any_torch = any((isinstance(arg, torch.Tensor) for arg in all_args)) if (any_torch and any_np): raise ValueError('Must pass only np.ndarray or torch.Tensor!') if any_np: (args, kwargs) = to_torch((args, kwargs), permute=permute) out = fn(*args, **kwargs) if any_np: out = to_numpy(out, permute=permute) return out return wrapper
@allow_np(permute=True) def standardize(x: Tensor, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> Tensor: 'Apply standardization. Default uses ImageNet statistics.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) mean = x.new_tensor(mean).view(shape) std = x.new_tensor(std).view(shape) x = ((x - mean) / std) return x
@allow_np(permute=True) def unstandardize(x: Tensor, /, mean: StatsRGB=_mean, std: StatsRGB=_std) -> Tensor: 'Remove standardization. Default uses ImageNet statistics.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) mean = x.new_tensor(mean).view(shape) std = x.new_tensor(std).view(shape) x = ((x * std) + mean) return x
@allow_np(permute=True) def to_gray(x: Tensor, /, coeffs: StatsRGB=_coeffs, keepdim: bool=False) -> Tensor: 'Convert image to grayscale.' shape = (([1] * (x.ndim - 3)) + [3, 1, 1]) coeffs = x.new_tensor(coeffs).view(shape) x = (x * coeffs).sum(dim=1, keepdim=keepdim) return x
def mean_normalize(x: Tensor, /, dim: Union[(int, Sequence[int])]=(2, 3)) -> Tensor: 'Apply mean normalization across the specified dimensions.\n\n :param x: (Tensor) (*) Input tensor to normalize of any shape.\n :param dim: (int | Sequence[int]) Dimension(s) to compute the mean across.\n :return: (Tensor) (*) Mean normalized input with the same shape.\n ' return (x / x.mean(dim=dim, keepdim=True).clamp(min=eps(x)))
def eye_like(x: Tensor, /) -> Tensor: 'Create an Identity matrix of the same dtype and size as the input.\n\n NOTE: The input can be of any shape, expect the final two dimensions, which must be square.\n\n :param x: (Tensor) (*, n, n) Input reference tensor, where `*` can be any size (including zero).\n :return: (Tensor) (*, n, n) Identity matrix with the same dtype and size as the input.\n ' ndim = x.ndim if (ndim < 2): raise ValueError(f'Input must have at least two dimensions! Got "{ndim}"') (n, n2) = (x.shape[(- 2)], x.shape[(- 1)]) if (n != n2): raise ValueError(f'Input last two dimensions must be square (*, n, n)! Got "{x.shape}"') view = (([1] * (ndim - 2)) + [n, n]) I = torch.eye(n, dtype=x.dtype, device=x.device).view(view).expand_as(x).clone() return I
def interpolate_like(input: Tensor, /, other: Tensor, mode: str='nearest', align_corners: bool=False) -> Tensor: 'Interpolate to match the size of `other` tensor.' if (mode == 'nearest'): align_corners = None return F.interpolate(input, size=other.shape[(- 2):], mode=mode, align_corners=align_corners)
def expand_dim(x: Tensor, /, num: Union[(int, Sequence[int])], dim: Union[(int, Sequence[int])]=0, insert: bool=False) -> Tensor: 'Expand the specified input tensor dimensions, inserting new ones if required.\n\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=False) # (1, 1, 1) -> (1, 5, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=5, dim=1, insert=True) # (1, 1, 1) -> (1, 5, 1, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=False) # (1, 1, 1) -> (5, 3, 1)\n >>> expand_dim(torch.rand(1, 1, 1), num=(5, 3), dim=(0, 1), insert=True) # (1, 1, 1) -> (5, 3, 1, 1, 1)\n\n :param x: (Tensor) (*) Input tensor of any shape.\n :param num: (int|Sequence[int]) Expansion amount for the target dimension(s).\n :param dim: (int|Sequence[int]) Dimension(s) to expand.\n :param insert: (bool) If `True`, insert a new dimension at the specified location(s).\n :return: (Tensor) (*, num, *) Expanded tensor at the given location(s).\n ' if isinstance(num, int): if isinstance(dim, int): (num, dim) = ([num], [dim]) else: num = ([num] * len(dim)) elif (len(num) != len(dim)): raise ValueError(f'Non-matching expansion and dims. ({len(num)} vs. {len(dim)})') for d in (dim if insert else ()): x = x.unsqueeze(d) sizes = ([(- 1)] * x.ndim) for (n, d) in zip(num, dim): sizes[d] = n return x.expand(sizes)
def get_cls(cls_dict: dict[(str, Type[T])], /, *args, type: str, **kwargs) -> T: 'Instantiate an arbitrary class from a collection.\n\n Including `type` makes it a keyword-only argument. This has the double benefit of forcing the user to pass it as a\n keyword argument, as well as popping it from the config kwargs.\n\n :param cls_dict: (Dict[str, cls]) Dict containing the mappings to all possible classes to choose from.\n :param args: (tuple) Args to forward to target class.\n :param type: (str) Key of the target class. Must be present as a keyword-only argument.\n :param kwargs: (dict) Kwargs to forward to target class.\n :return: Instance of the target class with the desired arguments.\n ' try: return cls_dict[type](*args, **kwargs) except Exception as e: raise ValueError(f'Error using "{type}" in {list(cls_dict)}') from e
def get_net(cfg: NetCfg) -> nn.ModuleDict: "Instantiate the target networks from a config dict.\n\n The depth estimation algorithm typically consists of multiple networks, commonly at least `depth` and `pose`.\n We're assuming that, within a given category, we can use different classes interchangeably.\n For instance, all `depth` networks take a single image as input and produce a multi-scale output, while all\n `pose` networks take multiple images and produce relative poses for each pair.\n\n New types and classes can be added to `NETWORKS` accordingly.\n\n :param cfg: (Dict[str, Dict[str, Any]]) Target network `types` and kwargs to forward to them.\n :return:\n " nets = {k: get_cls(NET_REG, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)} return nn.ModuleDict(OrderedDict(nets))
def get_loss(cfg: LossCfg) -> tuple[(nn.ModuleDict, nn.ParameterDict)]: 'Instantiate the target losses from a config dict.\n\n :param cfg: (Dict[str, Dict[str, Any]]) Target loss `types` and kwargs to forward to them.\n :return:\n ' (losses, weights) = (nn.ModuleDict(), nn.ParameterDict()) for (k, kw) in cfg.items(): if (kw is None): continue weights[k] = nn.Parameter(torch.as_tensor(kw.pop('weight', 1)), requires_grad=False) losses[k] = LOSS_REG[k](**kw) return (losses, weights)
def get_ds(cfg: DataCfg) -> Dataset: 'Instantiate the target data from a config dict.\n\n :param cfg: (Dict[str, Any]]) Target loss `types` and kwargs to forward to them.\n :return:\n ' ds = get_cls(DATA_REG, **cfg) return ds
def get_dl(mode: str, cfg_ds: DataCfg, cfg_dl: LoaderCfg) -> DataLoader: "Instantiate the target dataset loader from a config dict.\n\n Supports the presence of a common config, which gets overriden by specific cfg within each mode.\n Example:\n ```\n dataset:\n type: kitti\n depth_split: eigen_zhou\n\n core:\n mode: core\n aug: True\n\n val:\n mode: val\n aug: False\n ```\n\n By default we set `pin_memory = True` and `collate_fn = dataset.collate_fn`.\n This assumes we are using a `BaseDataset`.\n\n :param mode: (str) Dataset split: {'train', 'val', 'test'}.\n :param cfg_ds: (Dict[str, Any]]) Target dataset `type` and kwargs to forward to it (contains all modes).\n :param cfg_dl: (Dict[str, Any]]) Kwargs to forward to each dataloader (contains all modes).\n :return:\n " cfg = {k: v for (k, v) in cfg_ds.items() if (k not in {'train', 'val', 'test'})} cfg.update(cfg_ds.get(mode, {})) ds = get_ds(cfg) cfg = {k: v for (k, v) in cfg_dl.items() if (k not in {'train', 'val', 'test'})} cfg['pin_memory'] = cfg.get('pin_memory', True) cfg['collate_fn'] = ds.collate_fn cfg.update(cfg_dl.get(mode, {})) dl = DataLoader(ds, **cfg) return dl
def get_opt(parameters: Union[(Iterable, nn.Module)], cfg: OptCfg) -> optim.Optimizer: 'Instantiate the target learning rate scheduler from a config dict.\n\n Serves as a wrapper for `timm` `create_optimizer_v2` to maintain consistency in the export interface.\n\n :param parameters: (Iterable | nn.Module) Parameters to forward to the optimizer (in any `torch` format)\n :param cfg: (Dict[str, Any]) Target optimizer `type` and kwargs to forward to it.\n :return:\n ' if ('type' in cfg): cfg['opt'] = cfg.pop('type') elif ('opt' not in cfg): raise KeyError('Must provide a configuration key `type` or `opt` when instantiating an optimizer.') if cfg.pop('frozen_bn', False): if (not isinstance(parameters, nn.Module)): raise ValueError('Cannot freeze batch norm parameters unless given nn.Module') for m in parameters.modules(): if isinstance(m, nn.BatchNorm2d): m.requires_grad_(False) return create_optimizer_v2(parameters, **cfg)
def get_sched(opt: optim.Optimizer, cfg: SchedCfg) -> optim.lr_scheduler._LRScheduler: 'Instantiate the target learning rate scheduler from a config dict.\n\n TODO: Deprecate in favour of `timm` schedulers?\n\n :param opt: (optim.Optimizer) Optimizer to forward to the LR scheduler.\n :param cfg: (Dict[str, Any]) Target scheduler `type` and kwargs to forward to it.\n :return:\n ' sched = get_cls(SCHED_REG, opt, **cfg) return sched
def get_metrics() -> nn.ModuleDict: 'Instantiate the collection of depth metrics to monitor.' return nn.ModuleDict({'MAE': metrics.MAE(), 'RMSE': metrics.RMSE(), 'LogSI': metrics.ScaleInvariant(mode='log'), 'AbsRel': metrics.AbsRel(), 'Acc': metrics.DeltaAcc(delta=1.25)})
class TableFormatter(): 'Class to format a table as a LaTeX `booktabs` table.\n\n :param header: (Sequence[str]) (m,) Header elements represented as strings.\n :param labels: (Sequence[str]) (n,) Row names represented as strings.\n :param body: (Sequence[Sequence[float]]) (n, m) Table data for each `tag` and each `header`.\n :param metrics: (None|Sequence[int]) Value for each col indicating if a high/low value is better (+1/-1).\n ' def __init__(self, header: Sequence[str], labels: Sequence[Union[(str, Sequence[str])]], body: Sequence[Sequence[float]], metrics: Optional[Union[(int, Sequence[int])]]=None): self.header = header self.labels = labels self.body = np.array(body) self.metrics = np.array(metrics)[None] if (self.metrics.ndim == 1): self.metrics = self.metrics[None].repeat(len(header), axis=1) if (not isinstance(self.labels[0], str)): self.labels = [' '.join(l) for l in self.labels] shape = (len(self.labels), len(self.header)) if (shape != self.shape): raise ValueError(f'Shape mismatch. ({shape} vs. {self.shape})') if (self.metrics.shape[1] != self.shape[1]): raise ValueError(f'Metric type mismatch. ({self.metrics.shape[1]} vs. {self.shape[1]})') (self.best_mask, self.nbest_mask) = self._get_best() @classmethod def from_files(cls, files: Sequence[Path], key: Optional[Callable[([Path], str)]]=None, metrics: Optional[Union[(int, Sequence[int])]]=None): 'Classmethod to create a table from a list of files.\n\n :param files: (Sequence[Path]) Sequence of YAML files containing results.\n :param key: (Optional[Callable[[Path], str]]) Function to convert a file name into a tag for each row.\n :param metrics: (Optional[Sequence[int]]) Value for each col indicating whether a high/low value is better (+1/-1).\n :return:\n ' assert len(files), 'Must provide files to create table.' if (key is None): key = (lambda x: x.parents[2].name) return cls(header=list(load_yaml(files[0])), labels=list(map(key, files)), body=[list(load_yaml(f).values()) for f in files], metrics=metrics) @classmethod def from_df(cls, df: pd.DataFrame, metrics: Optional[Union[(int, Sequence[int])]]=None): 'Classmethod to create a table from a `DataFrame`.\n\n :param df: (pd.DataFrame) Pandas dataframe to create the table.\n :param metrics: (Optional[Sequence[int]]) Value for each col indicating if a high/low value is better (+1/-1).\n :return:\n ' return cls(header=df.columns, labels=df.index, body=df.to_numpy(), metrics=metrics) @classmethod def from_dict(cls, data): return cls(header=np.array(list(data)), labels=['Values'], body=np.array(list(data.values()))[None], metrics=None) def __str__(self) -> str: 'Format as a Latex table using default parameters.' return self.to_latex() @property def shape(self) -> tuple[(int, int)]: 'Table shape as (rows, cols).' return self.body.shape def _to_row(self, label: str, data: Sequence[str]) -> str: 'Create a table row.' return f'''{label} & {' & '.join(data)} \ ''' def _get_best(self) -> tuple[(NDArray, NDArray)]: 'Get a mask indicating the `best` and `next best` performing row per column.' if (self.metrics[(0, 0)] is None): return (np.zeros_like(self.body, dtype=bool), np.zeros_like(self.body, dtype=bool)) body = (self.body * self.metrics) best = body.max(axis=0, keepdims=True) best_mask = np.equal(body, best) if (self.shape[0] > 1): body[best_mask] = (- np.inf) nbest = body.max(axis=0, keepdims=True) nbest_mask = np.equal(body, nbest) else: nbest_mask = np.zeros_like(body, dtype=bool) return (best_mask, nbest_mask) def _get_col_width(self, width: Optional[Union[(int, Sequence[int])]], header: Sequence[str], body: NDArray) -> Sequence[int]: 'Get width for each column: dynamic, fixed or specified. ' if (width is None): width = np.concatenate(([list(map(len, header))], np.vectorize(len)(body)), axis=0).max(0) elif isinstance(width, int): width = ([width] * self.shape[1]) elif (len(width) != self.shape[1]): raise ValueError('Non-matching columns.') return width def to_latex(self, caption: str='CAPTION', precision: int=2, width: int=None) -> str: 'Create a Latex booktags table.\n\n :param caption: (str) Table caption.\n :param precision: (int) Precision when rounding table `body`.\n :param width: (int) Row character width.\n :return: (str) LaTeX table represented as a string.\n ' header = [h.replace('_', ' ') for h in self.header] labels = [l.replace('_', ' ') for l in self.labels] body = np.vectorize((lambda i: f'{i:.{precision}f}'))(self.body).astype('<U16') body[self.best_mask] = [f'est{{{i}}}' for i in body[self.best_mask]] body[self.nbest_mask] = [f''' best{{{i}}}''' for i in body[self.nbest_mask]] ws = self._get_col_width(width, header, body) header = [f'{h:>{w}}' for (h, w) in zip(header, ws)] body = np.stack([np.vectorize((lambda i: f'{i:>{w}}'))(col) for (w, col) in zip(ws, body.T)]).T table = (((('\\begin{table}\n\\renewcommand{\\arraystretch}{1.2}\n\\centering\n\\caption{' + caption) + '}\n\\begin{tabular}{@{}') + ('l' * (len(header) + 1))) + '@{}}\n\\toprule\n') n = max(map(len, self.labels)) table += self._to_row(label=(' ' * n), data=header) table += '\\midrule\n' for (tag, row) in zip(labels, body): table += self._to_row(label=f'{tag:>{n}}', data=row) table += '\\bottomrule\n\\end{tabular}\n\\end{table}\n' return table
def _get_percentile(x: NDArray, p: int) -> float: 'Safe percentile to handle NaNs/Inf values.' try: return np.percentile(x, p) except IndexError: return 0.0
@ops.allow_np(permute=True) def rgb_from_disp(disp: Tensor, invert: bool=False, cmap: str='turbo', vmin: float=0, vmax: Optional[Union[(float, Sequence[float])]]=None) -> Tensor: 'Convert a disparity map into an RGB colormap visualization.\n\n :param disp: (Tensor) (b, 1, h, w) or (h, w)\n :param invert: (bool) If `True` invert depth into disparity.\n :param cmap: (str) Matplotlib colormap name.\n :param vmin: (float) Minimum value to use when normalizing.\n :param vmax: (None|float|list) Maximum value to use when normalizing. If `None` use 95th percentile.\n :return:\n ' if isinstance(vmin, torch.Tensor): vmin = vmin.tolist() if isinstance(vmax, torch.Tensor): vmax = vmax.tolist() n = disp.ndim if (n == 2): disp = disp[(None, None)] if (n == 3): disp = disp[None] if invert: disp = geo.to_inv(disp) disp = ops.to_numpy(disp).squeeze((- 1)) if (vmax is None): vmax = [_get_percentile(d[(d > 0)], 95) for d in disp] elif (isinstance(vmax, (int, float)) or (isinstance(vmax, torch.Tensor) and (vmax.ndim == 0))): vmax = ([vmax] * disp.shape[0]) elif (len(vmax) != disp.shape[0]): raise ValueError(f'Non-matching vmax and disp. ({len(vmax)} vs. {disp.shape[0]})') rgb = torch.stack(ops.to_torch([apply_cmap(d, cmap=cmap, vmin=vmin, vmax=v) for (d, v) in zip(disp, vmax)])) if ((n == 2) or (n == 3)): rgb = rgb.squeeze(0) return rgb
@ops.allow_np(permute=True) def rgb_from_feat(feat: Tensor) -> Tensor: 'Convert dense features into an RGB image via PCA.\n\n NOTE: PCA is computed using all features in the batch, i.e. the representation is batch dependent.\n\n :param feat: (Tensor) (*b, c, h, w) Dense feature representation.\n :return: (Tensor) (*b, 3, h, w) The PCAd features.\n ' n = feat.ndim if (n == 3): feat = feat[None] (b, _, h, w) = feat.shape feat = ops.to_numpy(feat.permute(0, 2, 3, 1).flatten(0, 2)) proj = PCA(n_components=3).fit_transform(feat) proj -= proj.min(0) proj /= proj.max(0) proj = ops.to_torch(proj.reshape(b, h, w, 3)) if (n == 3): proj = proj.squeeze(0) return proj
class BaseNetCfg(TypedDict): 'Confing for a base network.' type: str
class BaseLossCfg(TypedDict): 'Config for a loss without parameters. We only require a weighting factor.' weight: float
class NetCfg(TypedDict): 'Config dict for a collection of networks.' depth: BaseNetCfg pose: BaseNetCfg
class LossCfg(TypedDict): 'Config dict for a collection of losses.' recon: BaseLossCfg smoooth: Optional[BaseLossCfg]
class DataCfg(TypedDict): 'Config dict for a collection of BaseDataset. Configs in {core, val, test} override values in main config.' type: str mode: str size: Sequence[int] supp_idxs: Optional[Sequence[int]] use_depth: Optional[bool] use_hints: Optional[bool] use_benchmark: Optional[bool] use_strong_aug: Optional[bool] as_torch: Optional[bool] use_aug: Optional[bool] log_time: Optional[bool] train: Optional['DataCfg'] val: Optional['DataCfg'] test: Optional['DataCfg']
class LoaderCfg(TypedDict): 'Config dict for a torch DataLoader.' batch_size: int num_workers: Optional[int] drop_last: Optional[bool] shuffle: Optional[bool] pin_memory: Optional[bool] train: Optional['LoaderCfg'] val: Optional['LoaderCfg'] test: Optional['LoaderCfg']
class OptCfg(TypedDict): 'Config dict for a torch Optimizer.' type: Optional[str] opt: Optional[str] lr: float
class SchedCfg(TypedDict): 'Config dict for a torch LRScheduler.' type: str
class TrainCfg(TypedDict): 'Config dict for training options.' max_epochs: bool resume_training: Optional[bool] load_ckpt: Optional[PathLike] log_every_n_steps: Optional[int] monitor: Optional[str] benchmark: Optional[bool] gradient_clip_val: Optional[float] precision: Optional[int] accumulate_grad_batches: Optional[int] swa: Optional[bool] early_stopping: Optional[bool] min_depth: Optional[float] max_depth: Optional[float]