code
stringlengths
17
6.64M
def blue(string): return (('\x1b[94m' + string) + '\x1b[0m')
def prompt_yes_no(question): '\n Prompt user to type yes or no.\n ' i = input((question + ' [y/n]: ')) if ((len(i) > 0) and ((i[0] == 'y') or (i[0] == 'Y'))): return True else: return False
class Visualizer(): def __init__(self, tb_path): self.tb_path = tb_path if os.path.exists(tb_path): if prompt_yes_no('{} already exists. Proceed?'.format(tb_path)): os.system('rm -r {}'.format(tb_path)) else: exit(0) self.writer = SummaryWriter(tb_path) def add_scalar(self, scalar_dict, global_step=None): for (tag, scalar) in scalar_dict.items(): if isinstance(scalar, dict): self.writer.add_scalars(tag, scalar, global_step) elif (isinstance(scalar, list) or isinstance(scalar, np.ndarray)): continue else: self.writer.add_scalar(tag, scalar, global_step) def add_images(self, image_dict, global_step=None, prefix=None): for (tag, images) in image_dict.items(): if (prefix is not None): tag = '{}/{}'.format(prefix, tag) images = torch.clamp(images, (- 1), 1) images = vutils.make_grid(images, nrow=images.size(0), normalize=True, range=((- 1), 1)) self.writer.add_image(tag, images, global_step)
def main(): TARGET_DIR = 'depth_benchmark' (K_RAW, K_DEPTH) = (DATA_PATHS['kitti_raw'], DATA_PATHS['kitti_depth']) print(f'-> Exporting Kitti Benchmark from "{K_DEPTH}" to "{K_RAW}"...') ROOT = (K_RAW / TARGET_DIR) ROOT.mkdir(exist_ok=True) for seq in kr.SEQS: (ROOT / seq).mkdir(exist_ok=True) for mode in ('train', 'val'): for dir in tqdm(sorted((K_DEPTH / mode).iterdir())): seq = next((s for s in kr.SEQS if dir.stem.startswith(s))) shutil.copytree(dir, ((ROOT / seq) / dir.stem), dirs_exist_ok=True)
def process_dataset(src_dir: Path, dst_dir: Path, use_hints: bool=True, use_benchmark: bool=True, overwrite: bool=False) -> None: 'Process the entire Kitti Raw Sync datsets.' (HINTS_DIR, BENCHMARK_DIR) = ('depth_hints', 'depth_benchmark') if (not (path := (dst_dir / 'splits')).is_dir()): shutil.copytree((src_dir / 'splits'), path) for seq in kr.SEQS: src_path = (src_dir / seq) dst_path = (dst_dir / seq) export_calibration(src_path, dst_path, overwrite) process_sequence(src_path, dst_path, overwrite) if use_hints: (src_hints, dst_hints) = ((src_dir / HINTS_DIR), (dst_dir / HINTS_DIR)) for src_scene in sorted(src_hints.iterdir()): dst_scene = (dst_hints / src_scene.name) process_sequence(src_scene, dst_scene, overwrite) if use_benchmark: (src_benchmark, dst_benchmark) = ((src_dir / BENCHMARK_DIR), (dst_dir / BENCHMARK_DIR)) for src_scene in sorted(src_benchmark.iterdir()): dst_scene = (dst_benchmark / src_scene.name) process_sequence(src_scene, dst_scene, overwrite)
def process_sequence(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26.' print(f'-> Processing sequence "{src_dir}"') for src_path in sorted(src_dir.iterdir()): if src_path.is_file(): continue dst_path = (dst_dir / src_path.name) process_drive(src_path, dst_path, overwrite)
def process_drive(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26/2011_09_26_drive_0005.' print(f' -> Processing drive "{src_dir}"') for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) process_dir(src_path, dst_path, overwrite)
def process_dir(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Processes a data directory within a given drive.\n\n Cases:\n - Base dataset: images_00, images_01, velodyne_points, oxts (/data & /timestamps for each)\n - Depth hints: images_02, images_03\n - Depth benchmark: groundtruth/image_02, groundtruth/image_03\n ' print(f' -> Processing dir "{src_dir}"') if ('depth_hints' in str(src_dir)): if ((not overwrite) and dst_dir.is_dir()): print(f' -> Skipping dir "{dst_dir}"') return export_hints(src_dir, dst_dir) elif ('depth_benchmark' in str(src_dir)): for src_path in sorted((src_dir / 'groundtruth').iterdir()): dst_path = ((dst_dir / 'groundtruth') / src_path.name) if ((not overwrite) and dst_path.is_dir()): print(f' -> Skipping dir "{dst_path}"') continue export_images(src_path, dst_path) else: for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) if src_path.is_file(): if (not dst_path.is_file()): shutil.copy(src_path, dst_path) else: assert (src_path.stem == 'data') file = next(src_path.iterdir(), None) if (file is None): dst_path.mkdir(exist_ok=True, parents=True) print(f' -> Skipping empty dir "{dst_path}"') continue ext = file.suffix if ((not overwrite) and dst_path.is_dir()): print(f' -> Skipping dir "{dst_path}"') continue if (ext == '.png'): export_images(src_path, dst_path) elif (ext == '.bin'): export_velodyne(src_path, dst_path) elif (ext == '.txt'): export_oxts(src_path, dst_path)
def export_calibration(src_seq: Path, dst_seq: Path, overwrite: bool=False) -> None: 'Exports sequence calibration information as a LabelDatabase of arrays.' dst_dir = (dst_seq / 'calibration') if ((not overwrite) and dst_dir.is_dir()): print(f' -> Skipping calib "{dst_dir}"') return else: print(f' -> Processing calib "{dst_dir}"') (cam2cam, imu2velo, velo2cam) = kr.load_calib(src_seq.stem) data = {'cam2cam': cam2cam, 'imu2velo': imu2velo, 'velo2cam': velo2cam} data = {f'{k1}/{k2}': v2 for (k1, v1) in data.items() for (k2, v2) in v1.items()} write_label_database(data, dst_dir)
def export_images(src_dir: Path, dst_dir: Path) -> None: 'Export images as an ImageDatabase.' image_paths = {file.stem: file for file in sorted(src_dir.iterdir())} write_image_database(image_paths, dst_dir)
def export_oxts(src_dir: Path, dst_dir: Path) -> None: 'Export OXTS dicts as a LabelDatabase.' data = {file.stem: kr.load_oxts(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_velodyne(src_dir: Path, dst_dir: Path) -> None: 'Export Velodyne points as a LabelDatabase of arrays.' data = {file.stem: kr.load_velo(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_hints(src_dir: Path, dst_dir: Path) -> None: 'Export depth hints as a LabelDatabase of arrays.' data = {file.stem: np.load(file) for file in sorted(src_dir.iterdir())} write_array_database(data, dst_dir)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_kitti(depth_split: str, mode: str, use_velo_depth: bool=False, save_stem: Optional[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for a given Kitti test split.\n\n :param depth_split: (str) Kitti depth split to load.\n :param mode: (str) Split mode to use. {'train', 'val', 'test'}\n :param use_velo_depth: (bool) If `True`, load the raw velodyne depth. Only used for legacy reasons!\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f''' -> Exporting ground truth depths for KITTI "{depth_split}/{mode}"...''') split_file = kr.get_split_file(depth_split, mode='test') lines = [line.split() for line in kr.load_split(split_file)] items = [{'seq': l[0], 'cam': (2 if (l[2] == 'l') else 3), 'stem': int(l[1])} for l in lines] save_file = (split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, Ks) = ([], []) for d in tqdm(items): (cam2cam, _, velo2cam) = kr.load_calib(d['seq'].split('/')[0]) if use_velo_depth: file = kr.get_velodyne_file(d['seq'], d['stem']) depth = kr.load_depth_velodyne(file, velo2cam, cam2cam, cam=d['cam'], use_velo_depth=use_velo_depth) else: file = kr.get_depth_file(d['seq'], f"image_0{d['cam']}", d['stem']) depth = kr.load_depth(file) depths.append(depth) Ks.append(cam2cam[f"K_0{d['cam']}"]) depths = np.array(depths, dtype=object) save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f'-> Saving to "{file}"...') np.savez_compressed(file, **kwargs)
def export_syns(mode, save_stem: Optional[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f'-> Exporting ground truth depths for SYNS "{mode}"...') dataset = SYNSPatchesDataset(mode, use_depth=True, use_edges=True, as_torch=False) save_file = (dataset.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, edges, Ks, cats, subcats) = ([], [], [], [], []) for (_, y, m) in tqdm(dataset): depths.append(y['depth'].squeeze()) Ks.append(y['K']) edges.append(y['edges'].squeeze()) cats.append(m['cat']) subcats.append(m['subcat']) save(save_file, depth=depths, K=Ks, edge=edges, cat=cats, subcat=subcats)
def main(): device = ops.get_device('cpu') root = MODEL_ROOTS[(- 1)] (exp, ckpt_name) = ('benchmark', 'last') files = sorted((root / exp).glob(f'**/{ckpt_name}.ckpt')) for f in files: n = str(f).replace(str(root), '') is_training = (f.parent / 'training').is_file() is_finished = (f.parent / 'finished').is_file() try: ckpt = torch.load(f, map_location=device) print(f"{n} - Epoch: {ckpt['epoch']} - Training: {is_training} - Finished: {is_finished}") except EOFError: print(f'CORRUPTED! {f} - Training: {is_training} - Finished: {is_finished}')
def load_dfs(files: dict[(str, Sequence[Path])]): df = pd.json_normalize([io.load_yaml(f) for fs in files.values() for f in fs]) df.index = [f'{k}' for (k, fs) in files.items() for (i, _) in enumerate(fs)] return df
def load_dfs(files: dict[(str, Sequence[Path])]): dfs = [pd.json_normalize(io.load_yaml(f)) for fs in files.values() for f in fs] df = pd.concat(dfs) models = [f'{k}' for (k, fs) in files.items() for _ in fs] df.index = pd.MultiIndex.from_product([models, dfs[0].index], names=['Model', 'Item']) return df
def save_metrics(file: Path, metrics: Sequence[Metrics]): 'Helper to save metrics. If any strings are present, save metrics separately. Otherwise save means.' print(f''' -> Saving results to "{file}"...''') file.parent.mkdir(exist_ok=True, parents=True) use_mean = all((isinstance(v, float) for v in metrics[0].values())) if use_mean: metrics = {k: float(np.array([m[k] for m in metrics]).mean()) for k in metrics[0]} write_yaml(file, metrics, mkdir=True)
def compute_eval_metrics(preds: NDArray, mode: str, cfg_file: Path) -> Sequence[Metrics]: 'Compute evaluation metrics from network predictions.\n Predictions must be unscaled (see `compute_eval_preds`).\n\n :param preds: (NDArray) (b, h, w) Precomputed unscaled network predictions.\n :param mode: (str) Evaluation mode, which determines prediction scaling. {stereo, mono}\n :param cfg_file: (Path) Path to YAML config file.\n :return: (list[Metrics]) Metrics computed for each dataset item.\n ' cfg = load_yaml(cfg_file) (cfg_ds, cfg_args) = (cfg['dataset'], cfg['args']) target_stem = cfg_ds.pop('target_stem', f"targets_{cfg.get('mode', 'test')}") ds = parsers.get_ds(cfg_ds) target_file = (ds.split_file.parent / f'{target_stem}.npz') print(f''' -> Loading targets from "{target_file}"...''') data = np.load(target_file, allow_pickle=True) evaluator = MonoDepthEvaluator(mode=mode, **cfg_args) metrics = evaluator.run(preds, data) return metrics
def save_preds(file: Path, preds: NDArray) -> None: 'Helper to save network predictions to a NPZ file. Required for submitted to the challenge.' file.parent.mkdir(exist_ok=True, parents=True) print(f'-> Saving network predictions to "{file}"...') np.savez_compressed(file, pred=preds)
def compute_eval_preds(ckpt_file: Union[(str, Path)], cfg: dict, overwrite: bool=False) -> NDArray: 'Compute network predictions required for evaluation.\n\n The confing in `cfg_dataset` is equivalent to that used by the `Trainer`.\n Note that in most cases, additional outputs, such as depth or edges can be omitted.\n Furthermore, image `size` is determined by the pretrained checkpoint.\n\n The config stored in `ckpt_file` is used to automatically determine:\n - Image size for network input.\n - Initial disparity scaling range.\n\n NOTE: The output disparities are NOT in metric depth. They are just scaled to the range expected by the network\n during training. We still need to apply fixed scaling (stereo) or median scaling (mono). This is done in the\n evaluation script by the `DepthEvaluator`.\n\n :param ckpt_file: (Path) Path to pretrained model checkpoint. Path can be absolute or relative to `MODEL_ROOTS`.\n :param cfg: (dict) Loaded YAML dataset config.\n :return: (ndarray) (b, h, w) Array containing unscaled network predictions for each dataset item.\n ' device = ops.get_device() ckpt_file = find_model_file(ckpt_file) if ((not (ckpt_file.parent / 'finished').is_file()) and (not overwrite)): print(f'-> Training for "{ckpt_file}" has not finished...') print('-> Set `--overwrite 1` to run this evaluation anyway...') exit() hparams_file = str((ckpt_file.parents[1] / 'hparams.yaml')) print(f''' -> Loading model weights from "{ckpt_file}"...''') mod = MonoDepthModule.load_from_checkpoint(ckpt_file, hparams_file=hparams_file, strict=False).eval() mod.freeze() cfg.update({'size': mod.cfg['dataset']['size'], 'as_torch': True, 'use_aug': False, 'log_time': False}) ds = parsers.get_ds(cfg) dl = DataLoader(ds, batch_size=12, num_workers=4, collate_fn=ds.collate_fn, pin_memory=True) print(f''' -> Computing predictions...''') preds = predict_depths(mod.nets['depth'].to(device), dl, device=device, min=mod.min_depth, max=mod.max_depth, use_stereo_blend=False) preds = ops.to_numpy(preds).squeeze() return preds
def load_dfs(d): df = pd.json_normalize([load_yaml(f) for fs in d.values() for f in fs]) df.index = [f'{m}' for (m, fs) in d.items() for (i, _) in enumerate(fs)] return df
def main(): pd.set_option('display.max_rows', None, 'display.max_columns', None) root = MODEL_ROOTS[(- 1)] exp = 'benchmark' split = 'eigen_benchmark' mode = '*' ckpt_name = 'best' res = 'results' fname = f'kitti_{split}_{ckpt_name}_{mode}.yaml' metric_type = ([(- 1), (- 1), (- 1), (- 1), (+ 1), (+ 1), (+ 1), (- 1), (+ 1), (+ 1), (+ 1), (+ 1)] if (split == 'eigen') else [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (+ 1), (+ 1), (+ 1), (+ 1)]) models = [] if (not models): fs = sorted(root.glob(f'{exp}/**/{res}/{fname}')) models = sorted({file.parents[2].stem for file in fs}) print('Evalutation Models:', models) eval_files = {m: sorted(root.glob(f'{exp}/{m}/**/{res}/{fname}')) for m in models} df = load_dfs(eval_files) df2 = df.groupby(level=0) df_mean = df2.agg('mean').reindex(models) df_mean.columns.name = 'Mean' df_std = df2.agg('std').reindex(models) df_std.columns.name = 'StdDev' print(TableFormatter.from_df(df_mean, metrics=metric_type).to_latex(precision=4))
def main(): parser = ArgumentParser(description='Monocular depth trainer.') parser.add_argument('--cfg-file', '-c', required=True, type=Path, help='Path to YAML config file to load.') parser.add_argument('--cfg-default', '-d', default=None, type=Path, help='Default YAML config file to overwrite.') parser.add_argument('--ckpt-dir', '-o', default=MODEL_ROOTS[(- 1)], type=Path, help='Root path to store checkpoint in.') parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.') parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.') parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.') args = parser.parse_args() fs = ([f, args.cfg_file] if (f := args.cfg_default) else [args.cfg_file]) cfg: MonoDepthCfg = io.load_merge_yaml(*fs) logger = pl.loggers.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=f'{args.version:03}', default_hp_metric=False) monitor = cfg['trainer'].get('monitor', 'AbsRel') mode = ('max' if ('Acc' in monitor) else 'min') cb_ckpt = plc.ModelCheckpoint(dirpath=Path(logger.log_dir, 'models'), filename='best', auto_insert_metric_name=False, monitor=f'val_metrics/{monitor}', mode=mode, save_last=True, save_top_k=1, verbose=True) cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.RichProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger()] if cfg['trainer'].get('swa'): cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.5, annealing_epochs=5, swa_lrs=None)) if cfg['trainer'].get('early_stopping'): cbks.append(plc.EarlyStopping(monitor=f'val_metrics/{monitor}', mode=mode, patience=5)) pl.seed_everything(args.seed) if (path := cfg['trainer'].get('load_ckpt')): path = find_model_file(path) print(f'Loading model from checkpoint: {path}') model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True) else: model = MonoDepthModule(cfg) resume_path = None if cfg['trainer'].get('resume_training'): print('Resuming training...') if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file(): resume_path = path else: print(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...') trainer = pl.Trainer(gpus=1, auto_select_gpus=True, max_epochs=cfg['trainer']['max_epochs'], limit_train_batches=1.0, limit_val_batches=200, accumulate_grad_batches=cfg['trainer'].get('accumulate_grad_batches', None), log_every_n_steps=cfg['trainer'].get('log_every_n_steps', 100), benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), logger=logger, callbacks=cbks, enable_model_summary=False) trainer.fit(model, ckpt_path=resume_path)
def main(): parser = ArgumentParser(description='Monocular depth trainer.') parser.add_argument('--cfg-file', '-c', required=True, type=Path, help='Path to YAML config file to load.') parser.add_argument('--cfg-default', '-d', default=None, type=Path, help='Default YAML config file to overwrite.') parser.add_argument('--ckpt-dir', '-o', default=Path('/tmp'), type=Path, help='Root path to store checkpoint in.') parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.') parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.') parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.') args = parser.parse_args() fs = ([f, args.cfg_file] if (f := args.cfg_default) else [args.cfg_file]) cfg: MonoDepthCfg = io.load_merge_yaml(*fs) logger = pl.loggers.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=f'{args.version:03}', default_hp_metric=False) monitor = cfg['trainer'].get('monitor', 'AbsRel') mode = ('max' if ('Acc' in monitor) else 'min') cb_ckpt = plc.ModelCheckpoint(dirpath=Path(logger.log_dir, 'models'), filename='best', auto_insert_metric_name=False, monitor=f'val_metrics/{monitor}', mode=mode, save_last=True, save_top_k=1, verbose=True) cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.TQDMProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger()] if cfg['trainer'].get('swa'): cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.5, annealing_epochs=5, swa_lrs=None)) if cfg['trainer'].get('early_stopping'): cbks.append(plc.EarlyStopping(monitor=f'val_metrics/{monitor}', mode=mode, patience=5)) pl.seed_everything(args.seed) if (path := cfg['trainer'].get('load_ckpt')): path = find_model_file(path) print(f'Loading model from checkpoint: {path}') model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True) else: model = MonoDepthModule(cfg) resume_path = None if cfg['trainer'].get('resume_training'): print('Resuming training...') if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file(): resume_path = path else: print(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...') num_batches = 10 max_epochs = 50 trainer = pl.Trainer(gpus=1, auto_select_gpus=False, max_epochs=max_epochs, limit_train_batches=num_batches, limit_val_batches=num_batches, log_every_n_steps=num_batches, benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), logger=logger, callbacks=cbks, enable_model_summary=False) trainer.fit(model, ckpt_path=resume_path)
def get_augmentations(strong=True): if strong: tfm = TrivialAugmentWide() else: tfm = ka.ColorJitter(brightness=(0.8, 1.2), contrast=(0.8, 1.2), saturation=(0.8, 1.2), hue=((- 0.1), 0.1), p=1.0, same_on_batch=True, keepdim=True) return tfm
class BaseDataset(ABC, Dataset): 'Base dataset class that all others should inherit from.\n\n The idea is to provide a common structure and format for data to follow. Additionally, provide some nice\n functionality and automation for the more boring stuff. Datasets are defined as providing the following dictionaries\n for each item:\n - x: Inputs to the network (typically \'imgs\').\n - y: Additional data required for loss computation (e.g. \'labels\') or for logging (e.g. non-augmented images).\n - meta: Metadata for the given item, typically for logging.\n\n BaseDataset will automatically add the following fields to \'meta\':\n - items: Item number (i.e. argument to \'__getitem__\').\n - errors: If \'retry_exc\' and NOT silent, log the exception messages caught.\n - aug: If \'use_aug\', child class should add a list of the aug performed.\n\n The additional features/utilities provided include:\n - A logger to be used for logging.\n - A timer which, if enabled, times load/augment for an item. Can also be used in the child class.\n - Functionality to automatically \'retry\' if the current item fails to load. This aims to replace "hacky"\n methods for manually filtering/blacklisting items, whilst being easy to enable & customize.\n - This functionality if wrapped in __getitem__, meaning that child classes only need to provide a \'load\' method,\n which loads the data and sorts it in the corresponding (x, y, meta) dicts.\n - Tools for visualizing/playing the dataset to inspect and sanity check it.\n\n Attributes:\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call \'self.augment\' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n\n Utilities:\n :attr logger: (Logger) Logger with parent CogvisDataset to use for logging.\n :attr timer: (MultiLevelTimer) If \'log_timings\', timer to use for timing blocks.\n\n Methods:\n :method __len__: (abstract) Number of items in dataset.\n :method __getitem__: Retrieve a given item in the dataset. Should not be modified.\n :method load: (abstract) Load a single raw dataset item.\n :method augment: (override) Apply augmentations to a single dataset item. Default: No-op.\n :method transform: (override) Apply common transforms to a single dataset item. Default: No-op.\n :method to_torch: (override) Convert (x, y, meta) to torch. Default: Convert and permute (>=3D).\n :method collate_fn: (override) Collate a batch in a DataLoader. Default: PyTorch base collate.\n :method create_axs: (override) Create matplotlib axes to display a dataset item. Default: Single axis.\n :method show: (abstract) Display a single dataset item.\n :method play: Iterate over dataset and display each item.\n ' def __init__(self, as_torch: bool=True, use_aug: bool=False, log_time: bool=True): self.logger.debug('Initializing BaseDataset') self.as_torch = as_torch self.use_aug = use_aug self.log_time = log_time self.timer = (MultiLevelTimer(name=self.__class__.__qualname__, as_ms=True, precision=4) if self.log_time else nullcontext) if self.use_aug: self.logger.info(f'Dataset augmentations ENABLED') if self.log_time: self.logger.info(f'Logging dataset loading times...') def __init_subclass__(cls, retry_exc=None, silent=False, max_retries=10, use_blacklist=False, **kwargs): 'Subclass initializer. We wrap the subclass init to replace kwargs.' super().__init_subclass__(**kwargs) cls.logger = get_logger(f'BaseDataset.{cls.__qualname__}') cls.__init__ = delegates(cls.__base__.__init__)(cls.__init__) cls.__getitem__ = retry_new_on_error(cls.__getitem__, exc=retry_exc, silent=silent, max=max_retries, use_blacklist=use_blacklist) def __repr__(self) -> str: sig = inspect.signature(self.__init__) kw = {k: getattr(self, k) for k in sig.parameters if hasattr(self, k)} kw = ', '.join((f'{k}={v}' for (k, v) in kw.items())) return f'{self.__class__.__qualname__}({kw})' @abstractmethod def __len__(self) -> int: 'Number of items in the dataset.' def __getitem__(self, item: int) -> BatchData: 'Generic dataset __getitem__. Loads, augments, times and converts data to torch (if required).' self.logger.debug(f'Loading item {item}...') (x, y, m) = ({}, {}, {'items': str(item)}) with self.timer('Total'): with self.timer('Load'): (x, y, m) = self.load(item, x, y, m) if self.use_aug: m['augs'] = '' with self.timer('Augment'): (x, y, m) = self.augment(x, y, m) with self.timer('Transform'): (x, y, m) = self.transform(x, y, m) if self.as_torch: with self.timer('ToTorch'): (x, y, m) = self.to_torch(x, y, m) if self.log_time: m['data_timer'] = self.timer.copy() self.logger.debug(str(self.timer)) self.timer.reset() return (x, y, m) @abstractmethod def load(self, item: int, x: dict, y: dict, m: dict) -> BatchData: "Load data for a single 'item'. MUST return (x, y, m)." def augment(self, x: dict, y: dict, m: dict) -> BatchData: 'Augment a loaded item. Default is a no-op.' return (x, y, m) def transform(self, x: dict, y: dict, m: dict) -> BatchData: 'Transform a loaded item. Default is a no-op.' return (x, y, m) def to_torch(self, x: dict, y: dict, m: dict) -> BatchData: 'Convert (x, y, m) to torch Tensors. Default converts to torch and permutes >=3D tensors.' return ops.to_torch((x, y, m)) @classmethod def collate_fn(cls, batch: Sequence[BatchData]): 'Function to collate multiple dataset items. By default uses the PyTorch collator.' return default_collate(batch) def create_axs(self) -> Axes: 'Create the axis structure required for plotting. Assumes data will be in numpy format.' (_, ax) = plt.subplots() return ax @abstractmethod def show(self, x: dict, y: dict, m: dict, axs: Optional[Axes]=None) -> None: "Show a single dataset item. Should call 'create_axs' if 'axs' is None." def play(self, fps: float=30, skip: int=1, reverse: bool=False, fullscreen: bool=False, axs: Optional[Axes]=None) -> None: 'Iterate through dataset at the required fps and show each item.' if self.as_torch: raise ValueError('Dataset must not be in torch format when playing.') axs = (self.create_axs() if (axs is None) else axs) fig = plt.gcf() if fullscreen: fig.canvas.manager.full_screen_toggle() items = (range((len(self) - 1), 0, (- skip)) if reverse else range(0, len(self), skip)) for i in items: (x, y, m) = self[i] (axs.cla() if isinstance(axs, plt.Axes) else [ax.cla() for ax in axs.flatten()]) self.show(x, y, m, axs) fig.suptitle(str(i)) plt.pause((1 / fps)) plt.show(block=False)
@register('kitti_lmdb') class KittiRawLMDBDataset(KittiRawDataset): "Kitti Depth based on the kitti_raw_sync dataset.\n\n LMDB variant of KittiRawDataset. This is designed to be a drop-in replacement that can help with IO load.\n As such, we only need to provide wrappers around the loading functions in the same format as the original dataset.\n\n The _databases are loaded as required and added to a cached dict.\n\n Attributes:\n :param split: (str) Kitti depth split to use (eigen, eigen_zhou, eigen_full, benchmark, odom).\n :param mode: (str) Dataset mode (core, val, test).\n :param size: (Sequence[int]) Target image training size as (w, h).\n :param supp_idxs: (int | Sequence[int]) Indexes of the support images to load.\n :param use_depth: (bool) If `True`, load ground truth LiDAR depth maps.\n :param use_hints: (bool) If `True`, load precomputed fused SGBM depth maps.\n :param use_benchmark: (bool) If `True`, load corrected ground truth depth maps.\n :param use_strong_aug: (bool) If `False`, use only colour jittering augmentations.\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call 'self.augment' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n " def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.image_dbs = {} self.depth_dbs = {} self.poses_dbs = {} self.hints_dbs = {} self.calib_dbs = {} self.preload() def preload(self) -> None: 'Create all LMDBs required by the dataset split.' drives = set((item['seq'] for item in self.items)) for d in drives: self.image_dbs[f'{d}/image_02'] = kr.load_images(*d.split('/'), 'image_02') self.image_dbs[f'{d}/image_03'] = kr.load_images(*d.split('/'), 'image_03') if self.use_hints: for d in drives: self.hints_dbs[f'{d}/image_02'] = kr.load_hints(*d.split('/'), 'image_02') self.hints_dbs[f'{d}/image_03'] = kr.load_hints(*d.split('/'), 'image_03') if self.use_depth: if self.use_benchmark: for d in drives: self.depth_dbs[f'{d}/image_02'] = kr.load_depths(*d.split('/'), 'image_02') self.depth_dbs[f'{d}/image_03'] = kr.load_depths(*d.split('/'), 'image_03') else: seqs = set((seq.split('/')[0] for seq in drives)) self.calib_dbs = {s: kr.load_calib(s) for s in seqs} for d in drives: (s, d2) = d.split('/') self.depth_dbs[d] = kr.load_velo_depths(s, d2, self.calib_dbs[s]) def parse_items(self) -> tuple[(Path, list[KittiRawItem])]: 'Helper to parse each dataset item as a sequence, camera and file number.' file = kr.get_split_file(self.depth_split, self.mode) lines = [line.split() for line in io.readlines(file)] items = [{'seq': line[0], 'cam': self.side2cam[line[2]], 'stem': int(line[1])} for line in lines] return (file, items) def load_image(self, data: KittiRawItem, offset: int=0) -> Image: "Load and resize a single image.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :param offset: (int) Additional offset to apply to the item number.\n :return: (Image) (self.w, self.h) Loaded PIL image.\n " k = f"{(data['stem'] + offset):010}" kdb = f"{data['seq']}/{data['cam']}" db = self.image_dbs[kdb] if (k not in db): raise FileNotFoundError(f'Could not find specified file "{kdb}/{k}" with "offset={offset!r}"') image = db[k].resize(self.size, resample=Image.BILINEAR) return image def load_depth(self, data: KittiRawItem) -> np.ndarray: "Load ground truth LiDAR depth.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :return: (ndarray) (h, w, 1) Loaded depth map. NOTE: Shape can vary for each item.\n " if self.use_benchmark: k = f"{data['stem']:010}" kdb = f"{data['seq']}/{data['cam']}" depth = self.depth_dbs[kdb][k] else: k = (f"{data['stem']:010}", int(data['cam'][(- 2):])) kdb = data['seq'] depth = self.depth_dbs[kdb][k] depth = skit.resize(depth, (self.h_full, self.w_full), order=0, preserve_range=True, mode='constant') return depth[(..., None)] def load_hint(self, data: KittiRawItem) -> np.ndarray: "Load a precomputed fusion of SGBM predictions.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :return: (array) (h, w, 1) (320, 1024) Loaded fused SGBM depth map.\n " k = f"{data['stem']:010}" kdb = f"{data['seq']}/{data['cam']}" depth = cv2.resize(self.hints_dbs[kdb][k], dsize=self.size, interpolation=cv2.INTER_NEAREST) return depth[(..., None)]
def main(): dataset = KittiRawLMDBDataset(split='benchmark2', mode='train', size=(640, 192), supp_idxs=None, use_depth=True, interpolate_depth=False, use_depth_hints=False, use_poses=False, use_strong_aug=False, as_torch=False, use_aug=False, log_timings=False) print(dataset) dataset.play(fps=100)
@register('syns_patches') class SYNSPatchesDataset(BaseDataset): "SYNS Patches dataset based on SYNS panorama images/LiDAR.\n\n See each function for details.\n\n Attributes:\n :param mode: (str) Split mode to load. {val, test, all}\n :param size: (Sequence[int]) Target image training size as (w, h).\n :param use_depth: (bool) If `True`, load ground truth LiDAR depth maps.\n :param use_edges: (bool) If `True`, load ground truth LiDAR depth maps.\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call 'self.augment' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n " def __init__(self, mode: str, size: tuple[(int, int)]=(640, 192), use_depth: bool=True, use_edges: bool=True, **kwargs): super().__init__(**kwargs) self.mode = mode (self.w, self.h) = self.size = size self.use_depth = use_depth self.use_edges = use_edges self.edges_dir = 'edges' if self.use_aug: raise ValueError('SYNS Patches is a testing dataset, no augmentations should be applied.') if ((self.mode in {'val', 'test'}) and (self.use_depth or self.use_edges)): raise ValueError('Cannot use ground truth depth when loading the testing or validation split!') (self.w_full, self.h_full) = self.size_full = (1242, 376) (self.split_file, self.items) = self.parse_items() if (self.h > self.w): raise ValueError(f'Target image height={self.h} is greater than image width={self.w}. Did you pass these in the correct order? Expected (width, height).') def __len__(self) -> int: 'Number of items in dataset.' return len(self.items) def parse_items(self) -> tuple[(Path, list[tuple[(str, str)]])]: 'Return the list of items in the dataset.' return syp.load_split(self.mode) def load(self, item: int, x: dict, y: dict, m: dict) -> BatchData: "Load single item in dataset.\n\n NOTE: Items in each dict will be converted into `torch.Tensors` if `self.as_torch=True`.\n\n :param item: (int) Dataset item to load.\n :param x: {\n imgs: (ndarray) (h, w, 3) Target image for depth estimation.\n }\n :param y: {\n images: (ndarray) (h, w, 3) x['imgs'] (NO AUGMENTATIONS).\n K: (ndarray) (4, 4) Camera intrinsics parameters.\n depth: (Optional[ndarray]) (h, w, 1) Ground truth LiDAR depth map.\n edges: (Optional[ndarray]) (h, w, 1) Ground truth depth edges.\n }\n :param m: {\n items: (str) Loaded dataset item.\n category: (str) Image category label.\n subcategory: (str) Image subcategory label.\n aug (str): Augmentations applied to current item.\n errors: (List[str]): List of errors when loading previous items.\n data_timer (MultiLevelTimer): Timing information for current item.\n }\n " d = self.items[item] (m['cat'], m['subcat']) = syp.load_category(d[0]) with self.timer('Image'): (img, img_res) = self.load_image(d) x['imgs'] = io.pil2np(img_res) y['imgs'] = io.pil2np(img) if self.use_depth: with self.timer('Depth'): y['depth'] = self.load_depth(d) if self.edges_dir: with self.timer('Edges'): edges = self.load_edges(d) y['edges'] = io.pil2np(edges)[(..., None)].astype(bool) y['K'] = syp.load_intrinsics() return (x, y, m) def load_image(self, data: tuple[(str, str)]) -> tuple[(Image, Image)]: "Load and resize a single image.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (Image) (self.w, self.h) Loaded PIL image.\n " file = syp.get_image_file(*data) img = Image.open(file) img_res = img.resize(self.size, resample=Image.BILINEAR) return (img, img_res) def load_depth(self, data: tuple[(str, str)]) -> np.ndarray: "Load a single depth map.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (ndarray) (self.full_w, self.full_h) Loaded numpy depth map.\n " file = syp.get_depth_file(*data) depth = np.load(file) return depth def load_edges(self, data: tuple[(str, str)]) -> Image: "Load a single depth edge map.\n\n :param data: (str, str) Data representing the item's scene and file number.\n :return: (Image) (self.full_w, self.full_h) Loaded PIL edge map.\n " file = syp.get_edges_file(data[0], self.edges_dir, data[1]) edges = Image.open(file) return edges def transform(self, x: dict, y: dict, m: dict) -> BatchData: 'Apply ImageNet standarization to the images processed by the network `x`.' x['imgs'] = ops.standardize(x['imgs']) return (x, y, m) def create_axs(self) -> Axes: 'Create the axis structure required for plotting.' (_, axs) = plt.subplots(((1 + self.use_depth) + (self.edges_dir is not None))) if isinstance(axs, plt.Axes): axs = np.array([axs]) plt.tight_layout() return axs def show(self, x: dict, y: dict, m: dict, axs: Optional[Axes]=None) -> None: 'Show a single dataset item.' axs = (self.create_axs() if (axs is None) else axs) axs[0].imshow(y['imgs']) if self.use_depth: axs[1].imshow(viz.rgb_from_disp(y['depth'], invert=True)) if self.edges_dir: axs[(- 1)].imshow(y['edges'])
def get_split_file(mode: str) -> Path: 'Get scene information file based on the scene number.' file = ((PATHS['syns_patches'] / 'splits') / f'{mode}_files.txt') return file
def get_scenes() -> list[Path]: 'Get paths to each of the scenes.' return sorted((path for path in PATHS['syns_patches'].iterdir() if (path.is_dir() and (path.stem != 'splits'))))
def get_scene_files(scene_dir: Path) -> dict[(str, Sequence[Path])]: 'Get paths to all subdir files for a given scene.' files = {key: sorted((scene_dir / key).iterdir()) for key in SUBDIRS if (scene_dir / key).is_dir()} return files
def get_info_file(scene: str) -> Path: 'Get scene information file based on the scene number.' paths = (PATHS['syns_patches'] / scene).iterdir() return next((f for f in paths if (f.suffix == '.txt')))
def get_image_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'images') / file)
def get_depth_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'depths') / file).with_suffix('.npy')
def get_edges_file(scene: str, subdir: str, file: str) -> Path: 'Get image filename based on scene and item number.' assert ('edges' in subdir), f'Must provide an "edges" directory. ({subdir})' assert (subdir in SUBDIRS), f"Non-existent edges directory. ({subdir} vs. {[s for s in SUBDIRS if ('edges' in s)]})" return (((PATHS['syns_patches'] / scene) / subdir) / file)
def load_info(scene: str) -> Sequence[str]: 'Load the scene information.' file = get_info_file(scene) info = readlines(file, encoding='latin-1') return info
def load_category(scene: str) -> tuple[(str, str)]: 'Load the scene category and subcategory.' info = load_info(scene) category = info[1].replace('Scene Category: ', '') try: (cat, subcat) = category.split(': ') except ValueError: (cat, subcat) = category.split(' - ') return (cat, subcat)
def load_split(mode) -> tuple[(Path, list[list[str]])]: 'Load the list of scenes and filenames that are part of the test split.\n\n Test split file is given as "SEQ ITEM":\n ```\n 01 00.png\n 10 11.png\n ```\n ' file = get_split_file(mode) lines = readlines(file) lines = [l.split(' ') for l in lines] return (file, lines)
def load_intrinsics() -> NDArray: 'Computes the virtual camera intrinsics for the `Kitti` based SYNS Patches.\n\n We compute this based on the desired FOV, using basic trigonometry.\n\n :return: (ndarray) (4, 4) Camera intrinsic parameters.\n ' (Fy, Fx) = KITTI_FOV (h, w) = KITTI_SHAPE (cx, cy) = ((w // 2), (h // 2)) fx = (cx / np.tan((np.deg2rad(Fx) / 2))) fy = (cy / np.tan((np.deg2rad(Fy) / 2))) K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) return K
def main(): import matplotlib.pyplot as plt for scene in get_scenes(): print(scene.stem) (_, axs11) = plt.subplots(3, 3) plt.tight_layout() (_, axs12) = plt.subplots(3, 3) plt.tight_layout() (_, axs21) = plt.subplots(3, 3) plt.tight_layout() (_, axs22) = plt.subplots(3, 3) plt.tight_layout() axs1 = np.concatenate((axs11.flatten(), axs12.flatten())) axs2 = np.concatenate((axs21.flatten(), axs22.flatten())) images = sorted((scene / 'images').iterdir()) depths = sorted((scene / 'depth_images').iterdir()) [ax.cla() for ax in axs1] [ax.cla() for ax in axs2] [ax.imshow(Image.open(f)) for (ax, f) in zip(axs1, images)] [ax.imshow(Image.open(f)) for (ax, f) in zip(axs2, depths)] [ax.set_title(i) for (i, ax) in enumerate(axs1)] plt.show()
class ChamferDistanceFunction(torch.autograd.Function): @staticmethod def forward(ctx, xyz1, xyz2): (batchsize, n, _) = xyz1.size() (_, m, _) = xyz2.size() xyz1 = xyz1.contiguous() xyz2 = xyz2.contiguous() dist1 = torch.zeros(batchsize, n) dist2 = torch.zeros(batchsize, m) idx1 = torch.zeros(batchsize, n, dtype=torch.int) idx2 = torch.zeros(batchsize, m, dtype=torch.int) if (not xyz1.is_cuda): cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) else: dist1 = dist1.cuda() dist2 = dist2.cuda() idx1 = idx1.cuda() idx2 = idx2.cuda() cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2) ctx.save_for_backward(xyz1, xyz2, idx1, idx2) return (dist1, dist2) @staticmethod def backward(ctx, graddist1, graddist2): (xyz1, xyz2, idx1, idx2) = ctx.saved_tensors graddist1 = graddist1.contiguous() graddist2 = graddist2.contiguous() gradxyz1 = torch.zeros(xyz1.size()) gradxyz2 = torch.zeros(xyz2.size()) if (not graddist1.is_cuda): cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) else: gradxyz1 = gradxyz1.cuda() gradxyz2 = gradxyz2.cuda() cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) return (gradxyz1, gradxyz2)
class ChamferDistance(torch.nn.Module): def __init__(self): super().__init__() if (cd is None): raise RuntimeError(f'Chamfer Distance module unavailable') def forward(self, xyz1, xyz2): return ChamferDistanceFunction.apply(xyz1, xyz2)
class Database(): _database = None _protocol = None _length = None def __init__(self, path: PathLike, readahead: bool=True, pre_open: bool=False): 'Base class for LMDB-backed _databases.\n\n :param path: (PathLike) Path to the database.\n :param readahead: (bool) If `True`, enables the filesystem readahead mechanism.\n :param pre_open: (bool) If `True`, the first iterations will be faster, but it will raise error when doing multi-gpu training.\n If `False`, the database will open when you will retrieve the first item.\n ' self.path = str(path) self.readahead = readahead self.pre_open = pre_open self._has_fetched_an_item = False @property def database(self): if (self._database is None): self._database = lmdb.open(path=self.path, readonly=True, readahead=self.readahead, max_spare_txns=256, lock=False) return self._database @database.deleter def database(self): if (self._database is not None): self._database.close() self._database = None @property def protocol(self): 'Read the pickle protocol contained in the database.\n\n :return: The set of available keys.\n ' if (self._protocol is None): self._protocol = self._get(item='protocol', convert_key=(lambda key: key.encode('ascii')), convert_value=(lambda value: pickle.loads(value))) return self._protocol @property def keys(self): 'Read the keys contained in the database.\n\n :return: The set of available keys.\n ' protocol = self.protocol keys = self._get(item='keys', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return keys def __len__(self): 'Returns the number of keys available in the database.\n\n :return: The number of keys.\n ' if (self._length is None): self._length = len(self.keys) return self._length def __getitem__(self, item): 'Retrieves an item or a list of items from the database.\n\n :param item: A key or a list of keys.\n :return: A value or a list of values.\n ' self._has_fetched_an_item = True if (not isinstance(item, list)): item = self._get(item, self._convert_key, self._convert_value) else: item = self._gets(item, self._convert_keys, self._convert_values) return item def __contains__(self, item): 'Check if a given key is in the database.' return (item in self.keys) def index(self, index): 'Retrieves an item or a list of items from the database from an integer index.\n\n :param index: An index or a list of indexes.\n :return: A value or a list of values.\n ' key = self.keys[index] return (key, self[key]) def _get(self, item, convert_key, convert_value): 'Instantiates a transaction and its associated cursor to fetch an item.\n\n :param item: A key.\n :param convert_key:\n :param convert_value:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: item = self._fetch(cursor, item, convert_key, convert_value) self._keep_database() return item def _gets(self, items, convert_keys, convert_values): 'Instantiates a transaction and its associated cursor to fetch a list of items.\n\n :param items: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: items = self._fetchs(cursor, items, convert_keys, convert_values) self._keep_database() return items def _fetch(self, cursor, key, convert_key, convert_value): 'Retrieve a value given a key.\n\n :param cursor:\n :param key: A key.\n :param convert_key:\n :param convert_value:\n :return: A value.\n ' key = convert_key(key=key) value = cursor.get(key=key) value = convert_value(value=value) return value def _fetchs(self, cursor, keys, convert_keys, convert_values): 'Retrieve a list of values given a list of keys.\n\n :param cursor:\n :param keys: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return: A list of values.\n ' keys = convert_keys(keys=keys) (_, values) = list(zip(*cursor.getmulti(keys))) values = convert_values(values=values) return values def _convert_key(self, key): 'Converts a key into a byte key.\n\n :param key: A key.\n :return: A byte key.\n ' return pickle.dumps(key, protocol=self.protocol) def _convert_keys(self, keys): 'Converts keys into byte keys.\n\n :param keys: A list of keys.\n :return: A list of byte keys.\n ' return [self._convert_key(key=key) for key in keys] def _convert_value(self, value): 'Converts a byte value back into a value.\n\n :param value: A byte value.\n :return: A value\n ' return pickle.loads(value) def _convert_values(self, values): 'Converts bytes values back into values.\n\n :param values: A list of byte values.\n :return: A list of values.\n ' return [self._convert_value(value=value) for value in values] def _keep_database(self): 'Checks if the database must be deleted.' if ((not self.pre_open) and (not self._has_fetched_an_item)): del self.database def __iter__(self): 'Provides an iterator over the keys when iterating over the database.' return iter(self.keys) def __del__(self): 'Closes the database properly.' del self.database
class ImageDatabase(Database): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL Image image.\n ' return Image.open(io.BytesIO(value))
class MaskDatabase(ImageDatabase): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL image.\n ' return Image.open(io.BytesIO(value)).convert('1')
class LabelDatabase(Database): pass
class ArrayDatabase(Database): _dtype = None _shape = None @property def dtype(self): if (self._dtype is None): protocol = self.protocol self._dtype = self._get(item='dtype', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._dtype @property def shape(self): if (self._shape is None): protocol = self.protocol self._shape = self._get(item='shape', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._shape def _convert_value(self, value): return np.frombuffer(value, dtype=self.dtype).reshape(self.shape) def _convert_values(self, values): return np.frombuffer(b''.join(values), dtype=self.dtype).reshape(((len(values),) + self.shape))
class TensorDatabase(ArrayDatabase): def _convert_value(self, value): return torch.from_numpy(super(TensorDatabase, self)._convert_value(value)) def _convert_values(self, values): return torch.from_numpy(super(TensorDatabase, self)._convert_values(values))
def write_image_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) for (key, value) in tqdm(sorted(d.items())): with env.begin(write=True) as txn: with value.open('rb') as file: key = pickle.dumps(key) txn.put(key=key, value=file.read(), dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
def write_label_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
def write_array_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) value = next(iter(d.values())) shape = value.shape dtype = value.dtype with env.begin(write=True) as txn: key = pickle.dumps('shape') value = pickle.dumps(shape) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('dtype') value = pickle.dumps(dtype) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
class DenseL1Error(nn.Module): 'Dense L1 loss averaged over channels.' def forward(self, pred, target): return (pred - target).abs().mean(dim=1, keepdim=True)
class DenseL2Error(nn.Module): 'Dense L2 distance.' def forward(self, pred, target): return (pred - target).pow(2).sum(dim=1, keepdim=True).clamp(min=ops.eps(pred)).sqrt()
class SSIMError(nn.Module): 'Structural similarity error.' def __init__(self): super().__init__() self.pool: nn.Module = nn.AvgPool2d(kernel_size=3, stride=1) self.refl: nn.Module = nn.ReflectionPad2d(padding=1) self.eps1: float = (0.01 ** 2) self.eps2: float = (0.03 ** 2) def forward(self, pred: Tensor, target: Tensor) -> Tensor: 'Compute the structural similarity error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, c, h, w) Structural similarity error.\n ' (x, y) = (self.refl(pred), self.refl(target)) (mu_x, mu_y) = (self.pool(x), self.pool(y)) sig_x = (self.pool((x ** 2)) - (mu_x ** 2)) sig_y = (self.pool((y ** 2)) - (mu_y ** 2)) sig_xy = (self.pool((x * y)) - (mu_x * mu_y)) num = ((((2 * mu_x) * mu_y) + self.eps1) * ((2 * sig_xy) + self.eps2)) den = ((((mu_x ** 2) + (mu_y ** 2)) + self.eps1) * ((sig_x + sig_y) + self.eps2)) loss = ((1 - (num / den)) / 2).clamp(min=0, max=1) return loss
class PhotoError(nn.Module): 'Class for computing the photometric error.\n From Monodepth (https://arxiv.org/abs/1609.03677)\n\n The SSIMLoss can be deactivated by setting `weight_ssim=0`.\n The L1Loss can be deactivated by setting `weight_ssim=1`.\n Otherwise, the loss is a weighted combination of both.\n\n Attributes:\n :param weight_ssim: (float) Weight controlling the contribution of the SSIMLoss. L1 weight is `1 - ssim_weight`.\n ' def __init__(self, weight_ssim: float=0.85): super().__init__() if ((weight_ssim < 0) or (weight_ssim > 1)): raise ValueError(f'Invalid SSIM weight. ({weight_ssim} vs. [0, 1])') self.weight_ssim: float = weight_ssim self.weight_l1: float = (1 - self.weight_ssim) self.ssim: Optional[nn.Module] = (SSIMError() if (self.weight_ssim > 0) else None) self.l1: Optional[nn.Module] = (DenseL1Error() if (self.weight_l1 > 0) else None) def forward(self, pred: Tensor, target: Tensor) -> Tensor: 'Compute the photometric error between two images.\n\n :param pred: (Tensor) (b, c, h, w) Predicted reconstructed images.\n :param target: (Tensor) (b, c, h, w) Target images to reconstruct.\n :return: (Tensor) (b, 1, h, w) Photometric error.\n ' (b, _, h, w) = pred.shape loss = pred.new_zeros((b, 1, h, w)) if self.ssim: loss += (self.weight_ssim * self.ssim(pred, target).mean(dim=1, keepdim=True)) if self.l1: loss += (self.weight_l1 * self.l1(pred, target)) return loss
@register(('img_recon', 'feat_recon', 'autoenc_recon')) class ReconstructionLoss(nn.Module): "Class to compute the reconstruction loss when synthesising new views.\n\n Contributions:\n - Min reconstruction error: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Static pixel automasking: From Monodepth2 (https://arxiv.org/abs/1806.01260)\n - Explainability mask: From SfM-Learner (https://arxiv.org/abs/1704.07813)\n - Uncertainty mask: From Klodt (https://openaccess.thecvf.com/content_ECCV_2018/papers/Maria_Klodt_Supervising_the_new_ECCV_2018_paper.pdf)\n\n :param loss_name: (str) Loss type to use.\n :param use_min: (bool) If `True`, take the final loss as the minimum across all available views.\n :param use_automask: (bool) If `True`, mask pixels where the original support image has a lower loss than the warped counterpart.\n :param mask_name: (Optional[str]) Weighting mask used. {'explainability', 'uncertainty', None}\n " def __init__(self, loss_name: str='ssim', use_min: bool=False, use_automask: bool=False, mask_name: Optional[str]=None): super().__init__() self.loss_name = loss_name self.use_min = use_min self.use_automask = use_automask self.mask_name = mask_name if (self.mask_name not in {'explainability', 'uncertainty', None}): raise ValueError(f'Invalid mask type: {self.mask_name}') self._photo = {'ssim': PhotoError(weight_ssim=0.85), 'l1': DenseL1Error(), 'l2': DenseL2Error()}[self.loss_name] def apply_mask(self, err: Tensor, mask: Optional[Tensor]=None) -> Tensor: 'Apply a weighting mask to a photometric loss error.\n\n :param err: (Tensor) (b, n, h, w) Photometric error to mask.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask to apply.\n :return: (Tensor) (b, n, h, w) The weighted photometric error.\n ' if (self.mask_name and (mask is None)): raise ValueError('Must provide a "mask" when masking...') if (self.mask_name == 'explainability'): err *= mask elif (self.mask_name == 'uncertainty'): err = ((err * (- mask).exp()) + mask) return err def apply_automask(self, err: Tensor, source: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> tuple[(Tensor, Tensor)]: 'Compute and apply an automask based on the identity reconstruction error.\n\n :param err: (Tensor) (b, 1, h, w) The photometric error for between target and warped support frames.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (Optional[Tensor]) (*n, b, 3, h, w) Original support images.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n err: (Tensor) (b, 1, h, w) The automasked photometric error.\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating pixels NOT removed by the automasking procedure.\n )\n ' err_static = self.compute_photo(source, target, mask=mask) err_static += (ops.eps(err_static) * torch.randn_like(err_static)) err = torch.cat((err, err_static), dim=1) (err, idxs) = torch.min(err, dim=1, keepdim=True) automask = (idxs == 0) return (err, automask) def compute_photo(self, pred: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> Tensor: 'Compute the dense photometric between multiple predictions and a single target.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (Tensor) (b, 1, h, w) The reduced photometric error.\n ' if (pred.ndim == 4): err = self._photo(pred, target) else: target = target[None].expand_as(pred) err = self._photo(pred.flatten(0, 1), target.flatten(0, 1)) err = err.squeeze(1).unflatten(0, pred.shape[:2]).permute(1, 0, 2, 3) err = self.apply_mask(err, mask) err = (err.min(dim=1, keepdim=True)[0] if self.use_min else err.mean(dim=1, keepdim=True)) return err def forward(self, pred: Tensor, target: Tensor, source: Optional[Tensor]=None, mask: Optional[Tensor]=None) -> LossData: 'Compute the reconstruction loss between two images.\n\n :param pred: (Tensor) (*n, b, 3, h, w) Synthesized warped support images.\n :param target: (Tensor) (b, 3, h, w) Target image to reconstruct.\n :param source: (Optional[Tensor]) (*n, b, 3, h, w) Original support images.\n :param mask: (Optional[Tensor]) (b, n, h, w) Optional weighting mask for the photometric error.\n :return: (\n loss: (Tensor) (,) Scalar loss.\n loss_dict: {\n (Optional) (If using automasking)\n automask: (Tensor) (b, 1, h, w) Boolean mask indicating pixels NOT removed by the automasking procedure.\n }\n )\n ' ld = {} err = self.compute_photo(pred, target, mask) if self.use_automask: if (source is None): raise ValueError('Must provide the original "source" images when automasking...') (err, automask) = self.apply_automask(err, source, target, mask) ld['automask'] = automask loss = err.mean() return (loss, ld)
def l1_loss(pred: Tensor, target: Tensor) -> Tensor: 'Dense L1 loss.' loss = (pred - target).abs() return loss
def log_l1_loss(pred: Tensor, target: Tensor) -> Tensor: 'Dense Log L1 loss.' loss = (1 + l1_loss(pred, target)).log() return loss
def berhu_loss(pred: Tensor, target: Tensor, delta: float=0.2, dynamic: bool=True) -> Tensor: 'Dense berHu loss.\n\n :param pred: (Tensor) Network prediction.\n :param target: (Tensor) Ground-truth target.\n :param delta: (float) Threshold above which the loss switches from L1.\n :param dynamic: (bool) If `True`, set threshold dynamically, using `delta` as the max error percentage.\n :return: (Tensor) The computed `berhu` loss.\n ' diff = l1_loss(pred, target) delta = (delta if (not dynamic) else (delta * diff.max())) diff_delta = ((diff.pow(2) + delta.pow(2)) / ((2 * delta) + ops.eps(pred))) loss = torch.where((diff <= delta), diff, diff_delta) return loss
@register(('depth_regr', 'stereo_const')) class RegressionLoss(nn.Module): 'Class implementing a supervised regression loss.\n\n NOTE: The DepthHints automask is not computed here. Instead, we rely on the `MonoDepthModule` to compute it.\n Probably not the best way of doing it, but it keeps this loss clean...\n\n Contributions:\n - Virtual stereo consistency: From Monodepth (https://arxiv.org/abs/1609.03677)\n - Proxy berHu regression: From Kuznietsov (https://arxiv.org/abs/1702.02706)\n - Proxy LogL1 regression: From Depth Hints (https://arxiv.org/abs/1909.09051)\n - Proxy loss automasking: From Depth Hints/Monodepth2 (https://arxiv.org/abs/1909.09051)\n\n :param loss_name: (str) Loss type to use. {l1, log_l1, berhu}\n :param use_automask: (bool) If `True`, use DepthHints automask based on the pred/hints errors.\n ' def __init__(self, loss_name: str='berhu', use_automask: bool=False): super().__init__() self.loss_name = loss_name self.use_automask = use_automask self.criterion = {'l1': l1_loss, 'log_l1': log_l1_loss, 'berhu': berhu_loss}[self.loss_name] def forward(self, pred: Tensor, target: Tensor, mask: Optional[Tensor]=None) -> LossData: if (mask is None): mask = torch.ones_like(target) err = (mask * self.criterion(pred, target)) loss = (err.sum() / mask.sum()) return (loss, {'err_regr': err, 'mask_regr': mask})
@register('autoencoder') class AutoencoderNet(nn.Module): "Image autoencoder network.\n From FeatDepth (https://arxiv.org/abs/2007.10603)\n\n Heavily based on the Depth network with some changes:\n - Single decoder\n - Produces 3 sigmoid channels (RGB)\n - No skip connections, it's an autoencoder!\n\n :param enc_name: (str) `timm` encoder key (check `timm.list_models()`).\n :param pretrained: (bool) If `True`, returns an encoder pretrained on ImageNet.\n :param dec_name: (str) Custom decoder type to use.\n :param out_scales: (Sequence[int]) List of multi-scale output downsampling factor as `2**s.`\n " def __init__(self, enc_name: str='resnet18', pretrained: bool=True, dec_name: str='monodepth', out_scales: Union[(int, Sequence[int])]=(0, 1, 2, 3)): super().__init__() self.enc_name = enc_name self.pretrained = pretrained self.dec_name = dec_name self.out_scales = ([out_scales] if isinstance(out_scales, int) else out_scales) if (self.dec_name not in DECODERS): raise KeyError(f'Invalid decoder key. ({self.dec_name} vs. {DECODERS.keys()}') self.encoder = timm.create_model(self.enc_name, features_only=True, pretrained=pretrained) self.num_ch_enc = self.encoder.feature_info.channels() self.enc_sc = self.encoder.feature_info.reduction() self.decoder = DECODERS[self.dec_name](num_ch_enc=self.num_ch_enc, enc_sc=self.enc_sc, upsample_mode='nearest', use_skip=False, out_sc=self.out_scales, out_ch=3, out_act='sigmoid') def forward(self, x: Tensor) -> TensorDict: 'Image autoencoder forward pass.\n\n :param x: (Tensor) (b, 3, h, w) Input image.\n :return: {\n autoenc_feats: (list(Tensor)) Autoencoder encoder multi-scale features.\n autoenc_imgs: (TensorDict) (b, 1, h/2**s, w/2**s) Dict mapping from scales to image reconstructions.\n }\n ' feat = self.encoder(x) out = {'autoenc_feats': feat} k = 'autoenc_imgs' out[k] = self.decoder(feat) out[k] = {k2: out[k][k2] for k2 in sorted(out[k])} return out
class StructurePerception(nn.Module): 'Self-attention Structure Perception Module.' def forward(self, x): (b, c, h, w) = x.shape value = x.view(b, c, (- 1)) query = value key = value.permute(0, 2, 1) att = (query @ key) att = (att.max(dim=(- 1), keepdim=True)[0] - att) out = (att.softmax(dim=(- 1)) @ value) out = (x + out.view(b, c, h, w)) return out
class DetailEmphasis(nn.Module): 'Detail Emphasis Module.\n\n :param ch: (int) Number of input/output channels.\n ' def __init__(self, ch: int): super().__init__() self.conv = nn.Sequential(conv3x3(ch, ch), nn.BatchNorm2d(ch), nn.ReLU(inplace=True)) self.att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(ch, ch, kernel_size=1, stride=1, padding=0), nn.Sigmoid()) def forward(self, x): x = self.conv(x) x = (x + (x * self.att(x))) return x
class CADepthDecoder(nn.Module): "From CADepth (https://arxiv.org/abs/2112.13047)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) self.convs[f'detail_emphasis_{i}'] = DetailEmphasis(num_ch_in) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.structure_perception = StructurePerception() self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, enc_features): out = {} x = self.structure_perception(enc_features[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [enc_features[idx]] x = torch.cat(x, 1) x = self.convs[f'detail_emphasis_{i}'](x) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
def get_discrete_bins(n: int, mode: str='linear') -> Tensor: 'Get the discretized disparity value depending on number of bins and quantization mode.\n\n All modes assume that we are quantizing sigmoid disparity, and therefore are in range [0, 1].\n Quantization modes:\n - linear: Evenly spaces out all bins.\n - exp: Spaces bins out exponentially, providing finer detail at low disparity values, ie higher depth values.\n\n :param n: (int) Number of bins to use.\n :param mode: (str) Quantization mode. {linear, exp}\n :return: (Tensor) (1, n, 1, 1) Computed discrete disparity bins.\n ' bins = (torch.arange(n) / n) if (mode == 'linear'): pass elif (mode == 'exp'): max_depth = Tensor(200) bins = torch.exp((torch.log(max_depth) * (bins - 1))) else: raise ValueError(f'Invalid discretization mode. "{mode}"') return bins.view(1, n, 1, 1)
class SelfAttentionBlock(nn.Module): 'Self-Attention Block.\n\n :param ch: (int) Number of input/output channels.\n ' def __init__(self, ch): super().__init__() self.query_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.key_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.value_conv = nn.Sequential(nn.Conv2d(ch, ch, kernel_size=1, padding=0), nn.ReLU(inplace=True)) def forward(self, x): (b, c, h, w) = x.shape query = self.query_conv(x).flatten((- 2), (- 1)) key = self.key_conv(x).flatten((- 2), (- 1)).permute(0, 2, 1) value = self.value_conv(x).flatten((- 2), (- 1)) att = (query @ key) out = (att.softmax(dim=(- 1)) @ value) out = out.view(b, c, h, w) return out
class DDVNetDecoder(nn.Module): "From DDVNet (https://arxiv.org/abs/2003.13951)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.num_bins = 128 self.bins = nn.Parameter(get_discrete_bins(self.num_bins, mode='linear')) self.convs = OrderedDict() self.convs['att'] = SelfAttentionBlock(self.num_ch_enc[(- 1)]) for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], (self.num_bins * self.out_ch)) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] self.logits = {} def expected_disparity(self, logits: Tensor) -> Tensor: 'Maps discrete disparity logits into the expected weighted disparity.\n\n :param logits: (Tensor) (b, n, h, w) Raw unnormalized predicted probabilities.\n :return: (Tensor) (b, 1, h, w) Expected disparity map.\n ' probs = logits.softmax(dim=1) disp = (probs * self.bins).sum(dim=1, keepdim=True) return disp def argmax_disparity(self, logits: Tensor) -> Tensor: idx = logits.argmax(dim=1) one_hot = F.one_hot(idx, self.num_bins).permute(0, 3, 1, 2) disp = (one_hot * self.bins).sum(dim=1, keepdim=True) return disp def forward(self, enc_features: Sequence[Tensor]) -> dict[(int, Tensor)]: out = {} x = self.convs['att'](enc_features[(- 1)]) for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x += [enc_features[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): logits = self.convs[f'outconv_{i}'](x) self.logits[i] = logits out[i] = torch.cat([self.expected_disparity(l) for l in logits.chunk(self.out_ch, dim=1)], dim=1) return out
def upsample_block(in_ch: int, out_ch: int, upsample_mode: str='nearest') -> nn.Module: 'Layer to upsample the input by a factor of 2 without skip connections.' return nn.Sequential(conv_block(in_ch, out_ch), nn.Upsample(scale_factor=2, mode=upsample_mode), conv_block(out_ch, out_ch))
class ChannelAttention(nn.Module): 'Channel Attention Module incorporating Squeeze & Exicitation.\n\n :param in_ch: (int) Number of input channels.\n :param ratio: (int) Channels reduction ratio in bottleneck.\n ' def __init__(self, in_ch: int, ratio: int=16): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(in_ch, (in_ch // ratio), bias=False), nn.ReLU(inplace=True), nn.Linear((in_ch // ratio), in_ch, bias=False)) self.init_weights() def init_weights(self): 'Kaiming weight initialization.' for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') def forward(self, x): att = self.avg_pool(x) att = self.fc(att.squeeze()).sigmoid() return (x * att[(..., None, None)])
class AttentionBlock(nn.Module): "Attention Block incorporating channel attention.\n\n :param in_ch: (int) Number of input channels.\n :param skip_ch: (int) Number of channels in skip connection features.\n :param out_ch: (Optional[int]) Number of output channels.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n " def __init__(self, in_ch: int, skip_ch: int, out_ch: Optional[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.layers = nn.Sequential(ChannelAttention(self.in_ch), conv3x3(self.in_ch, self.out_ch), nn.ReLU(inplace=True)) def forward(self, x, x_skip): return self.layers(torch.cat((F.interpolate(x, scale_factor=2, mode=self.upsample_mode), x_skip), dim=1))
class DiffNetDecoder(nn.Module): "From DiffNet (https://arxiv.org/abs/2110.09482)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = nn.ModuleDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_skip = self.num_ch_enc[idx] self.convs[f'upconv_{i}'] = AttentionBlock(num_ch_in, num_ch_skip, num_ch_out, self.upsample_mode) else: self.convs[f'upconv_{i}'] = upsample_block(num_ch_in, num_ch_out, self.upsample_mode) for i in range(4): self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) self.activation = ACT[self.out_act] def forward(self, enc_features): out = {} x = enc_features[(- 1)] for i in range(4, (- 1), (- 1)): scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) x = self.convs[f'upconv_{i}'](x, enc_features[idx]) else: x = self.convs[f'upconv_{i}'](x) if (i in self.out_sc): out[i] = self.activation(self.convs[f'outconv_{i}'](x)) return out
class FSEBlock(nn.Module): def __init__(self, in_ch: int, skip_ch: int, out_ch: Optional[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.reduction = 16 self.avg_pool = nn.AdaptiveAvgPool2d(1) self.se = nn.Sequential(nn.Linear(self.in_ch, (self.in_ch // self.reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((self.in_ch // self.reduction), self.in_ch, bias=False)) self.conv = nn.Sequential(conv1x1(self.in_ch, self.out_ch, bias=True), nn.ReLU(inplace=True)) def forward(self, x: Tensor, xs_skip: Sequence[Tensor]) -> Tensor: x = F.interpolate(x, scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) y = self.avg_pool(x).squeeze() y = self.se(y).sigmoid() y = y[(..., None, None)].expand_as(x) x = self.conv((x * y)) return x
class HRDepthDecoder(nn.Module): "From HRDepth (https://arxiv.org/pdf/2012.07356.pdf)\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (not self.use_skip): raise ValueError('HRDepth decoder must use skip connections.') if (len(self.enc_sc) == 4): warnings.warn('HRDepth requires 5 scales, but the provided backbone has only 4. The first scale will be duplicated and upsampled!') self.enc_sc = ([(self.enc_sc[0] // 2)] + self.enc_sc) self.num_ch_enc = ([self.num_ch_enc[0]] + self.num_ch_enc) if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [(ch // 2) for ch in self.num_ch_enc[1:]] self.num_ch_dec = ([(self.num_ch_dec[0] // 2)] + self.num_ch_dec) self.all_idx = ['01', '11', '21', '31', '02', '12', '22', '03', '13', '04'] self.att_idx = ['31', '22', '13', '04'] self.non_att_idx = ['01', '11', '21', '02', '12', '03'] self.convs = nn.ModuleDict() for j in range(5): for i in range((5 - j)): ch_in = self.num_ch_enc[i] if ((i == 0) and (j != 0)): ch_in //= 2 if ((i == 0) and (j == 4)): ch_in = (self.num_ch_enc[(i + 1)] // 2) ch_out = (ch_in // 2) self.convs[f'{i}{j}_conv_0'] = conv_block(ch_in, ch_out) if ((i == 0) and (j == 4)): ch_in = ch_out ch_out = self.num_ch_dec[i] self.convs[f'{i}{j}_conv_1'] = conv_block(ch_in, ch_out) for idx in self.att_idx: (row, col) = (int(idx[0]), int(idx[1])) self.convs[f'{idx}_att'] = FSEBlock(in_ch=(self.num_ch_enc[(row + 1)] // 2), skip_ch=(self.num_ch_enc[row] + (self.num_ch_dec[(row + 1)] * (col - 1))), upsample_mode=self.upsample_mode) for idx in self.non_att_idx: (row, col) = (int(idx[0]), int(idx[1])) if (col == 1): self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]), out_ch=self.num_ch_dec[(row + 1)]) else: self.convs[f'{idx}_down'] = conv1x1(in_ch=(((self.num_ch_enc[(row + 1)] // 2) + self.num_ch_enc[row]) + (self.num_ch_dec[(row + 1)] * (col - 1))), out_ch=(2 * self.num_ch_dec[(row + 1)]), bias=False) self.convs[f'{(row + 1)}{(col - 1)}_conv_1'] = conv_block(in_ch=(2 * self.num_ch_dec[(row + 1)]), out_ch=self.num_ch_dec[(row + 1)]) channels = self.num_ch_dec for (i, c) in enumerate(channels): if (i in self.out_sc): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(c, self.out_ch), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def nested_conv(self, convs: Sequence[nn.Module], x: Tensor, xs_skip: Sequence[Tensor]) -> Tensor: x = F.interpolate(convs[0](x), scale_factor=2, mode=self.upsample_mode) x = torch.cat([x, *xs_skip], dim=1) if (len(convs) == 3): x = convs[2](x) x = convs[1](x) return x def forward(self, enc_features: Sequence[Tensor]) -> dict[(int, Tensor)]: if (len(enc_features) == 4): enc_features = ([F.interpolate(enc_features[0], scale_factor=2, mode=self.upsample_mode)] + enc_features) feat = {f'{i}0': f for (i, f) in enumerate(enc_features)} for idx in self.all_idx: (row, col) = (int(idx[0]), int(idx[1])) xs_skip = [feat[f'{row}{i}'] for i in range(col)] if (idx in self.att_idx): feat[f'{idx}'] = self.convs[f'{idx}_att'](self.convs[f'{(row + 1)}{(col - 1)}_conv_0'](feat[f'{(row + 1)}{(col - 1)}']), xs_skip) elif (idx in self.non_att_idx): conv = [self.convs[f'{(row + 1)}{(col - 1)}_conv_0'], self.convs[f'{(row + 1)}{(col - 1)}_conv_1']] if (col != 1): conv.append(self.convs[f'{idx}_down']) feat[f'{idx}'] = self.nested_conv(conv, feat[f'{(row + 1)}{(col - 1)}'], xs_skip) x = feat['04'] x = self.convs['04_conv_0'](x) x = self.convs['04_conv_1'](F.interpolate(x, scale_factor=2, mode=self.upsample_mode)) out_feat = [x, feat['04'], feat['13'], feat['22']] out = {i: self.convs[f'outconv_{i}'](f) for (i, f) in enumerate(out_feat) if (i in self.out_sc)} return out
def main(): num_enc_ch = [64, 64, 128, 256, 512] enc_sc = [2, 4, 8, 16, 32] (b, h, w) = (4, 256, 512) enc_features = [torch.rand((b, c, (h // s), (w // s))) for (s, c) in zip(enc_sc, num_enc_ch)] net = HRDepthDecoder(num_ch_enc=num_enc_ch, enc_sc=enc_sc, out_sc=range(4), out_ch=1) out = net(enc_features) [print(key, val.shape) for (key, val) in out.items()]
class MonodepthDecoder(nn.Module): "From Monodepth(2) (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch) self.decoder = nn.ModuleList(list(self.convs.values())) self.act = ACT[self.out_act] def forward(self, enc_feat: Sequence[Tensor]) -> TensorDict: out = {} x = enc_feat[(- 1)] for i in range(4, (- 1), (- 1)): x = self.convs[f'upconv_{i}_{0}'](x) x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [enc_feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.act(self.convs[f'outconv_{i}'](x)) return out
class SubPixelConv(nn.Module): def __init__(self, ch_in: int, up_factor: int): super().__init__() ch_out = (ch_in * (up_factor ** 2)) self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=(3, 3), groups=ch_in, padding=1) self.shuffle = nn.PixelShuffle(up_factor) self.init_weights() def init_weights(self): nn.init.zeros_(self.conv.bias) self.conv.weight = nn.Parameter(self.conv.weight[::4].repeat_interleave(4, 0)) def forward(self, x): return self.shuffle(self.conv(x))
class SuperdepthDecoder(nn.Module): "From SuperDepth (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (Sequence[int]) List of channels per encoder stage.\n :param enc_sc: (Sequence[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (Sequence[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n " def __init__(self, num_ch_enc: Sequence[int], enc_sc: Sequence[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: Sequence[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'): super().__init__() self.num_ch_enc = num_ch_enc self.enc_sc = enc_sc self.upsample_mode = upsample_mode self.use_skip = use_skip self.out_sc = out_sc self.out_ch = out_ch self.out_act = out_act if (self.out_act not in ACT): raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}') self.activation = ACT[self.out_act] self.num_ch_dec = [16, 32, 64, 128, 256] self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{0}'] = nn.Sequential(conv_block(num_ch_in, num_ch_out), SubPixelConv(num_ch_out, up_factor=2), nn.ReLU(inplace=True)) num_ch_in = self.num_ch_dec[i] scale_factor = (2 ** i) if (self.use_skip and (scale_factor in self.enc_sc)): idx = self.enc_sc.index(scale_factor) num_ch_in += self.num_ch_enc[idx] num_ch_out = self.num_ch_dec[i] self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out) for i in self.out_sc: if (i == 0): self.convs[f'outconv_{i}'] = nn.Sequential(conv3x3(self.num_ch_dec[i], self.out_ch), self.activation) else: self.convs[f'outconv_{i}'] = nn.Sequential(conv_block(self.num_ch_dec[i], self.out_ch), SubPixelConv(self.out_ch, up_factor=(2 ** i)), self.activation) self.decoder = nn.ModuleList(list(self.convs.values())) def forward(self, feat: Sequence[Tensor]) -> dict[(int, Tensor)]: out = {} x = feat[(- 1)] for i in range(4, (- 1), (- 1)): x = [self.convs[f'upconv_{i}_{0}'](x)] sf = (2 ** i) if (self.use_skip and (sf in self.enc_sc)): idx = self.enc_sc.index(sf) x += [feat[idx]] x = torch.cat(x, 1) x = self.convs[f'upconv_{i}_{1}'](x) if (i in self.out_sc): out[i] = self.convs[f'outconv_{i}'](x) return out
def conv1x1(in_ch: int, out_ch: int, bias: bool=True) -> nn.Conv2d: 'Layer to convolve input.' return nn.Conv2d(in_ch, out_ch, kernel_size=(1, 1), bias=bias)
def conv3x3(in_ch: int, out_ch: int, bias: bool=True) -> nn.Conv2d: 'Layer to pad and convolve input.' return nn.Conv2d(in_ch, out_ch, kernel_size=(3, 3), padding=1, padding_mode='reflect', bias=bias)
def conv_block(in_ch: int, out_ch: int) -> nn.Module: 'Layer to perform a convolution followed by ELU.' return nn.Sequential(OrderedDict({'conv': conv3x3(in_ch, out_ch), 'act': nn.ELU(inplace=True)}))
def _load_roots(): 'Helper to load the additional model & data roots from the repo config.' file = (REPO_ROOT / 'PATHS.yaml') if file.is_file(): paths = load_yaml(file) model_roots = [Path(p) for p in paths['MODEL_ROOTS']] data_roots = [Path(p) for p in paths['DATA_ROOTS']] else: warnings.warn(_msg.format(file=file)) (model_roots, data_roots) = ([], []) return (model_roots, data_roots)
def _build_paths(names: dict[(str, str)], roots: list[Path]): 'Helper to build the paths from a list of possible `roots`.\n NOTE: This returns the FIRST found path given by the order of roots. I.e. ordered by priority.\n ' paths = {} for (k, v) in names.items(): try: paths[k] = next((p for r in roots if (p := (r / v)).exists())) print(f'Found path "{k}": {paths[k]}') except StopIteration: warnings.warn(f'No valid path found for "{k}"!') return paths
def find_model_file(name: str) -> Path: 'Helper to find a model file in the available roots.' if (p := Path(name)).is_file(): return p try: return next((p for r in MODEL_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {MODEL_ROOTS}...')
def find_data_dir(name: str) -> Path: 'Helper to find a dataset directory in the available roots.' if (p := Path(name)).is_dir(): return p try: return next((p for r in DATA_ROOTS if (p := (r / name)).is_file())) except StopIteration: raise FileNotFoundError(f'No valid path found for {name} in {DATA_ROOTS}...')
def register(name: Union[(str, Sequence[str])], type: Optional[str]=None, overwrite: bool=False) -> Callable: "Class decorator to build a registry of networks, losses & data available during training.\n\n :param name: (str|Sequence[str]) Key(s) to access class in the registry.\n :param type: (None|str) Registry to use. If `None`, guess from class name. {None, 'net', 'loss', 'data'}\n :param overwrite: (bool) If `True`, overwrite class `name` in registry `type`.\n :return:\n " def get_type(cls): 'Helper to identify registry `type` from class name.' try: return next((v for (k, v) in _NAME2TYPE.items() if cls.__name__.endswith(k))) except StopIteration: raise ValueError(f'Class matched no valid patterns. ("{cls.__name__}" vs. {set(_NAME2TYPE)})') def wrapper(cls): 'Decorator adding `cls` to the specified registry.' if (cls.__module__ == '__main__'): warnings.warn(f'Ignoring class "{cls.__name__}" created in the "__main__" module.') return cls ns = ((name,) if isinstance(name, str) else name) t = (type or get_type(cls)) if (t not in _REG): raise TypeError(f'Invalid `type`. ("{t}" vs. {set(_REG)})') reg = _REG[t] for n in ns: if ((not overwrite) and (tgt := reg.get(n))): raise ValueError(f'"{n}" already in "{t}" registry ({tgt} vs. {cls}). Set `overwrite=True` to overwrite it.') reg[n] = cls return cls return wrapper
@register('disp_mask') class MaskReg(nn.Module): 'Class implementing photometric loss masking regularization.\n From SfM-Learner (https://arxiv.org/abs/1704.07813)\n\n Based on the `explainability` mask, which predicts a weighting factor for each pixel in the photometric loss.\n To avoid the degenerate solution where all pixels are ignored, this regularization pushes all values towards 1\n using binary cross-entropy.\n ' def forward(self, x: Tensor) -> LossData: 'Mask regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid explainability mask.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = F.binary_cross_entropy(x, torch.ones_like(x)) return (loss, {})
@register('disp_occ') class OccReg(nn.Module): 'Class implementing disparity occlusion regularization.\n From DVSO (https://arxiv.org/abs/1807.02570)\n\n This regularization penalizes the overall disparity in the image, encouraging the network to select background\n disparities.\n\n NOTE: In this case we CANNOT apply mean normalization to the input disparity. By definition, this fixes the mean of\n all elements to 1, meaning the loss is impossible to minimize.\n\n NOTE: The benefits of applying this regularization to purely monocular supervision are unclear,\n since the loss could simply be optimized by making all disparities smaller.\n\n :param invert: (bool) If `True`, encourage foreground disparities instead of background.\n ' def __init__(self, invert: bool=False): super().__init__() self.invert = invert self._sign = nn.Parameter(torch.tensor(((- 1) if self.invert else 1)), requires_grad=False) def forward(self, x: Tensor) -> LossData: 'Occlusion regularization forward pass.\n\n :param x: (Tensor) (*) Input sigmoid disparities.\n :return: {\n loss: (Tensor) (,) Computed loss.\n loss_dict: (TensorDict) {}.\n }\n ' loss = (self._sign * x.mean()) return (loss, {})
def get_device(device: Optional[Union[(str, torch.device)]]=None, /) -> torch.device: 'Create torch device from str or device. Defaults to CUDA if available.' if isinstance(device, torch.device): return device device = (device or ('cuda' if torch.cuda.is_available() else 'cpu')) return torch.device(device)
def get_latest_ckpt(path: PathLike, ignore: Sequence[str]=None, reverse: bool=False, suffix: str='.ckpt') -> Optional[Path]: 'Return latest or earliest checkpoint in the directory. Assumes files can be sorted in a meaningful way.\n\n :param path: (PathLike) Directory to search in.\n :param ignore: (Sequence[str]) Filenames to ignore, e.g. corrupted?\n :param reverse: (bool) If `True`, return earliest checkpoint.\n :param suffix: (str) Expected checkpoint file extension.\n :return: (Path) Latest checkpoint file or `None`.\n ' path = Path(path) ignore = (ignore or []) if (('last' not in ignore) and (last_file := (path / ('last' + suffix))).is_file()): return last_file files = filter((lambda f: ((f.suffix == suffix) and (f.name not in ignore))), sorted(path.iterdir(), reverse=(not reverse))) file = None with suppress(StopIteration): file = next(files) return file
def eps(x: Optional[torch.Tensor]=None, /) -> float: 'Return the `eps` value for the given `input` dtype. (default=float32 ~= 1.19e-7)' dtype = (torch.float32 if (x is None) else x.dtype) return torch.finfo(dtype).eps
def freeze(net: nn.Module, /) -> nn.Module: 'Fix all model parameters and prevent training.' for p in net.parameters(): p.requires_grad = False return net
def unfreeze(net: nn.Module, /) -> nn.Module: 'Make all model parameters trainable.' for p in net.parameters(): p.requires_grad = True return net
def allclose(net1: nn.Module, net2: nn.Module, /) -> bool: 'Check if two networks are equal.' for (p1, p2) in zip(net1.parameters(), net2.parameters()): try: if (not p1.allclose(p2)): return False except RuntimeError: return False return True
def num_parameters(net: nn.Module, /) -> int: 'Get number of trainable parameters in a network.' return sum((p.numel() for p in net.parameters() if p.requires_grad))
@map_container def to_torch(x: Any, /, permute: bool=True, device: Optional[torch.device]=None) -> Any: 'Convert given input to torch.Tensors\n\n :param x: (Any) Arbitrary structure to convert to tensors (see `map_apply`).\n :param permute: (bool) If `True`, permute to PyTorch convention (b, h, w, c) -> (b, c, h, w).\n :param device: (torch.device) Device to send tensors to.\n :return: (Any) Input structure, converted to tensors.\n ' if isinstance(x, (str, Timer, MultiLevelTimer)): return x x = torch.as_tensor(x, device=device) if (permute and (x.ndim > 2)): dim = [(- 1), (- 3), (- 2)] dim = (list(range((x.ndim - 3))) + dim) x = x.permute(dim) return x