code stringlengths 17 6.64M |
|---|
def save(file: Path, **kwargs) -> None:
'Save a list of arrays as a npz file.'
print(f'''
-> Saving to "{file}"...''')
np.savez_compressed(file, **kwargs)
|
def export_sintel(mode, save_stem: str=None, overwrite: bool=False) -> None:
'Export the ground-truth synthetic depth images for Sintel.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n '
print(f"-> Exporting ground truth depths for Sintel '{mode}'...")
ds = SintelDataset(mode=mode, as_torch=False)
save_file = (ds.split_file.parent / f'{save_stem}.npz')
if ((not overwrite) and save_file.is_file()):
raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite")
(depths, Ks) = ([], [])
for (x, y, m) in tqdm(ds):
depths.append(y['depth'].squeeze())
Ks.append(y['K'])
save(save_file, depth=np.array(depths), K=np.array(Ks))
|
def save(file: Path, **kwargs) -> None:
'Save a list of arrays as a npz file.'
print(f'''
-> Saving to '{file}'...''')
np.savez_compressed(file, **kwargs)
|
def export_tum(mode: str, save_stem: str, overwrite: bool=False) -> None:
'Export the ground-truth depth maps for TUM.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n '
print(f"-> Exporting ground truth depths for TUM '{mode}'...")
ds = TumDataset(mode=mode, as_torch=False)
save_file = (ds.split_file.parent / f'{save_stem}.npz')
if ((not overwrite) and save_file.is_file()):
raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite")
depths = np.array([batch[1]['depth'].squeeze() for batch in tqdm(ds)])
save(save_file, depth=depths)
|
def process_dataset(src_dir: Path, dst_dir: Path, use_hints: bool=True, use_benchmark: bool=True, overwrite: bool=False) -> None:
'Process the entire Kitti Raw Sync dataset.'
(HINTS_DIR, BENCHMARK_DIR) = ('depth_hints', 'depth_benchmark')
if (not (path := (dst_dir / 'splits')).is_dir()):
shutil.copytree((src_dir / 'splits'), path)
for seq in kr.SEQS:
src_path = (src_dir / seq)
dst_path = (dst_dir / seq)
export_calibration(src_path, dst_path, overwrite)
process_sequence(src_path, dst_path, overwrite)
if use_hints:
(src_hints, dst_hints) = ((src_dir / HINTS_DIR), (dst_dir / HINTS_DIR))
for src_scene in sorted(src_hints.iterdir()):
dst_scene = (dst_hints / src_scene.name)
process_sequence(src_scene, dst_scene, overwrite)
if use_benchmark:
(src_benchmark, dst_benchmark) = ((src_dir / BENCHMARK_DIR), (dst_dir / BENCHMARK_DIR))
for src_scene in sorted(src_benchmark.iterdir()):
dst_scene = (dst_benchmark / src_scene.name)
process_sequence(src_scene, dst_scene, overwrite)
|
def process_sequence(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26.'
print(f"-> Processing sequence '{src_dir}'")
for src_path in sorted(src_dir.iterdir()):
if src_path.is_file():
continue
dst_path = (dst_dir / src_path.name)
process_drive(src_path, dst_path, overwrite)
|
def process_drive(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26/2011_09_26_drive_0005.'
print(f" -> Processing drive '{src_dir}'")
for src_path in sorted(src_dir.iterdir()):
dst_path = (dst_dir / src_path.name)
process_dir(src_path, dst_path, overwrite)
|
def process_dir(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Processes a data directory within a given drive.\n\n Cases:\n - Base dataset: images_00, images_01, velodyne_points, oxts (/data & /timestamps for each)\n - Depth hints: images_02, images_03\n - Depth benchmark: groundtruth/image_02, groundtruth/image_03\n '
print(f" -> Processing dir '{src_dir}'")
if ('depth_hints' in str(src_dir)):
if ((not overwrite) and dst_dir.is_dir()):
print(f" -> Skipping dir '{dst_dir}'")
return
export_hints(src_dir, dst_dir)
elif ('depth_benchmark' in str(src_dir)):
for src_path in sorted((src_dir / 'groundtruth').iterdir()):
dst_path = ((dst_dir / 'groundtruth') / src_path.name)
if ((not overwrite) and dst_path.is_dir()):
print(f" -> Skipping dir '{dst_path}'")
continue
export_images(src_path, dst_path)
else:
for src_path in sorted(src_dir.iterdir()):
dst_path = (dst_dir / src_path.name)
if src_path.is_file():
if (not dst_path.is_file()):
shutil.copy(src_path, dst_path)
else:
assert (src_path.stem == 'data')
file = next(src_path.iterdir(), None)
if (file is None):
dst_path.mkdir(exist_ok=True, parents=True)
print(f" -> Skipping empty dir '{dst_path}'")
continue
ext = file.suffix
if ((not overwrite) and dst_path.is_dir()):
print(f" -> Skipping dir '{dst_path}'")
continue
if (ext == '.png'):
export_images(src_path, dst_path)
elif (ext == '.bin'):
export_velodyne(src_path, dst_path)
elif (ext == '.txt'):
export_oxts(src_path, dst_path)
|
def export_calibration(src_seq: Path, dst_seq: Path, overwrite: bool=False) -> None:
'Exports sequence calibration information as a LabelDatabase of arrays.'
dst_dir = (dst_seq / 'calibration')
if ((not overwrite) and dst_dir.is_dir()):
print(f" -> Skipping calib '{dst_dir}'")
return
else:
print(f" -> Processing calib '{dst_dir}'")
(cam2cam, imu2velo, velo2cam) = kr.load_calib(src_seq.stem)
data = {'cam2cam': cam2cam, 'imu2velo': imu2velo, 'velo2cam': velo2cam}
data = {f'{k1}/{k2}': v2 for (k1, v1) in data.items() for (k2, v2) in v1.items()}
write_label_database(data, dst_dir)
|
def export_images(src_dir: Path, dst_dir: Path) -> None:
'Export images as an ImageDatabase.'
image_paths = {file.stem: file for file in sorted(src_dir.iterdir())}
write_image_database(image_paths, dst_dir)
|
def export_oxts(src_dir: Path, dst_dir: Path) -> None:
'Export OXTS dicts as a LabelDatabase.'
data = {file.stem: kr.load_oxts(file) for file in sorted(src_dir.iterdir())}
write_label_database(data, dst_dir)
|
def export_velodyne(src_dir: Path, dst_dir: Path) -> None:
'Export Velodyne points as a LabelDatabase of arrays.'
data = {file.stem: kr.load_velo(file) for file in sorted(src_dir.iterdir())}
write_label_database(data, dst_dir)
|
def export_hints(src_dir: Path, dst_dir: Path) -> None:
'Export depth hints as a LabelDatabase of arrays.'
data = {file.stem: np.load(file) for file in sorted(src_dir.iterdir())}
write_array_database(data, dst_dir)
|
def process_dataset(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process the entire MannequinChallenge dataset.'
print(f"-> Copying splits directory '{(dst_dir / 'splits')}'...")
shutil.copytree((src_dir / 'splits'), (dst_dir / 'splits'), dirs_exist_ok=True)
for mode in ('train', 'val', 'test'):
process_mode((src_dir / mode), (dst_dir / mode), overwrite)
|
def process_mode(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full MannequinChallenge mode, e.g. train or val.'
calibs = {d.stem: mc.load_info(dst_dir.stem, d.stem) for d in tqdm(src_dir.iterdir())}
export_intrinsics(src_dir, (dst_dir / 'intrinsics'), calibs, overwrite)
export_shapes(src_dir, (dst_dir / 'shapes'), calibs, overwrite)
export_poses(src_dir, (dst_dir / 'poses'), calibs, overwrite)
export_images(src_dir, (dst_dir / 'images'), overwrite)
|
def export_intrinsics(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create camera intrinsics LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Intrinsics already exist for dir '{src_dir.stem}'")
return
all_Ks = {}
for (k, v) in tqdm(calibs.items()):
Ks = np.stack((vv['K'] for vv in v.values()))
are_equal = (Ks[0] == Ks).all(axis=((- 2), (- 1)))
if (not are_equal.all()):
LOGGER.warning(f'Miss-matched Ks! {Ks[0]} {Ks[np.where((~ are_equal))]}')
all_Ks[k] = Ks[0]
print(f"-> Exporting intrinsics for dir '{src_dir.stem}'")
write_label_database(all_Ks, (dst_dir / 'intrinsics'))
|
def export_shapes(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create image shapes LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Shapes already exist for dir '{src_dir.stem}'")
return
all_shapes = {}
for (k, v) in tqdm(calibs.items()):
shapes = np.stack((vv['shape'] for vv in v.values()))
if (not (shapes[0] == shapes).all()):
raise ValueError(f'Miss-matched shapes!')
all_shapes[k] = shapes[0]
print(f"-> Exporting shapes for dir '{src_dir.stem}'")
write_label_database(all_shapes, (dst_dir / 'shapes'))
|
def export_poses(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create camera poses LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Poses already exist for dir '{src_dir.stem}'")
return
print(f'-> Exporting poses for dir {src_dir.stem}')
all_poses = {f'{k}/{kk}': vv['T'] for (k, v) in tqdm(calibs.items()) for (kk, vv) in v.items()}
write_label_database(all_poses, (dst_dir / 'poses'))
|
def export_images(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Create images LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Images already exist for dir '{src_dir.stem}'")
return
print(f"-> Exporting images for dir '{src_dir.stem}'")
files = {f'{d.stem}/{p.stem}': p for d in tqdm(io.get_dirs(src_dir)) for p in io.get_files(d, key=(lambda f: (f.suffix == '.jpg')))}
write_image_database(files, dst_dir)
|
def process_dataset(overwrite=False):
(src, dst) = (PATHS['slow_tv'], PATHS['slow_tv_lmdb'])
print(f"-> Copying splits directory '{(dst / 'splits')}'...")
shutil.copytree((src / 'splits'), (dst / 'splits'), dirs_exist_ok=True)
export_intrinsics(dst, overwrite)
args = [((src / seq), dst, overwrite) for seq in stv.get_seqs()]
with Pool() as p:
list(p.starmap(export_seq, tqdm(args)))
|
def export_seq(path: Path, save_root: Path, overwrite: bool=False) -> None:
'Convert SlowTV video into an LMDB.'
seq = path.stem
out_dir = (save_root / seq)
if ((not overwrite) and out_dir.is_dir()):
print(f'-> Skipping directory "{out_dir}"...')
return
print(f'-> Export LMDB for dir "{seq}"')
paths = {p.stem: p for p in io.get_files(path, key=(lambda f: (f.suffix == '.png')))}
write_image_database(paths, out_dir)
|
def export_intrinsics(save_root: Path, overwrite: bool=False) -> None:
'Export SlowTV intrinsics as an LMDB.'
out_dir = (save_root / 'calibs')
if ((not overwrite) and out_dir.is_dir()):
print(f'-> Skipping LMDB calibrations...')
return
print(f"""-> Exporting intrinsics "{(save_root / 'calibs')}"...""")
data = {seq: stv.load_intrinsics(seq) for seq in stv.get_seqs()}
write_label_database(data, (save_root / 'calibs'))
|
def read_array(path):
with open(path, 'rb') as fid:
(width, height, channels) = np.genfromtxt(fid, delimiter='&', max_rows=1, usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if (byte == b'&'):
num_delimiter += 1
if (num_delimiter >= 3):
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order='F')
return np.transpose(array, (1, 0, 2)).squeeze().astype(np.float32)
|
def export_split(split, src, dst, overwrite=False):
print(f'-> Exporting "{split}" split...')
dst = (dst / split)
io.mkdirs(dst)
seqs = io.get_dirs((src / split))
dsts = [(dst / s.stem) for s in seqs]
ovs = [overwrite for _ in seqs]
with Pool(8) as p:
for _ in tqdm(p.imap_unordered(export_seq, zip(seqs, dsts, ovs)), total=len(seqs)):
pass
return {}
|
def export_seq(args):
try:
(src, dst, overwrite) = args
depth_dir = (dst / 'depths')
if ((not overwrite) and depth_dir.is_dir()):
print(f'-> Skipping "{src.parent.stem}" sequence "{src.stem}"...')
return
print(f'-> Exporting "{src.parent.stem}" sequence "{src.stem}"...')
shutil.rmtree(dst, ignore_errors=True)
io.mkdirs(dst)
db_path = (dst / 'database.db')
img_dir = (dst / 'images')
sparse_dir = (dst / 'sparse')
refined_dir = (dst / 'refined')
dense_dir = (dst / 'dense')
io.mkdirs(img_dir, sparse_dir, refined_dir, dense_dir)
[shutil.copy(f, img_dir) for f in io.get_files(src, key=(lambda f: (f.suffix == '.jpg')))]
subprocess.call(['colmap', 'feature_extractor', '--ImageReader.single_camera', '1', '--ImageReader.default_focal_length_factor', '0.85', '--SiftExtraction.peak_threshold', '0.02', '--database_path', db_path, '--image_path', img_dir])
subprocess.call(['colmap', 'exhaustive_matcher', '--SiftMatching.max_error', '3', '--SiftMatching.min_inlier_ratio', '0.3', '--SiftMatching.min_num_inliers', '30', '--SiftMatching.guided_matching', '1', '--database_path', db_path])
subprocess.call(['colmap', 'mapper', '--Mapper.tri_merge_max_reproj_error', '3', '--Mapper.ignore_watermarks', '1', '--Mapper.filter_max_reproj_error', '2', '--database_path', db_path, '--image_path', img_dir, '--output_path', sparse_dir])
subprocess.call(['colmap', 'bundle_adjuster', '--input_path', (sparse_dir / '0'), '--output_path', refined_dir])
subprocess.call(['colmap', 'image_undistorter', '--input_path', refined_dir, '--image_path', img_dir, '--output_path', dense_dir, '--output_type', 'COLMAP', '--max_image_size', '1600'])
subprocess.call(['colmap', 'patch_match_stereo', '--PatchMatchStereo.window_radius', '5', '--PatchMatchStereo.num_samples', '15', '--PatchMatchStereo.geom_consistency_regularizer', '1', '--PatchMatchStereo.geom_consistency_max_cost', '1.5', '--PatchMatchStereo.filter_min_ncc', '0.2', '--PatchMatchStereo.filter_min_num_consistent', '3', '--PatchMatchStereo.geom_consistency', 'true', '--workspace_path', dense_dir, '--workspace_format', 'COLMAP'])
files = io.get_files(((dense_dir / 'stereo') / 'depth_maps'), key=(lambda f: ('geometric' in str(f))))
[np.save((src / f"{f.name.split('.')[0]}.npy"), read_array(f)) for f in files]
io.mkdirs(depth_dir)
except:
pass
|
def main(root):
dst = (root / 'colmap')
io.mkdirs(dst)
splits = ['test']
fails = {}
for s in tqdm(splits):
fails[s] = export_split(s, root, dst, overwrite=False)
print(fails)
|
def main(src, dst):
TARGET_DIR = 'depth_benchmark'
(K_DEPTH, K_RAW) = (src, dst)
print(f'-> Exporting Kitti Benchmark from "{K_DEPTH}" to "{K_RAW}"...')
ROOT = (K_RAW / TARGET_DIR)
ROOT.mkdir(exist_ok=True)
for seq in kr.SEQS:
(ROOT / seq).mkdir(exist_ok=True)
for mode in ('train', 'val'):
for path in tqdm(sorted((K_DEPTH / mode).iterdir())):
seq = next((s for s in kr.SEQS if path.stem.startswith(s)))
shutil.copytree(path, ((ROOT / seq) / path.stem), dirs_exist_ok=True)
|
def loadmat(file):
'Conflict with specific matfile versions?'
f = h5py.File(file)
arr = {k: np.array(v) for (k, v) in f.items()}
return arr
|
def export_split(mode, idxs, data, dst):
img_dir = ((dst / mode) / 'rgb')
depth_dir = ((dst / mode) / 'depth')
split_file = ((dst / 'splits') / f'{mode}_files.txt')
io.mkdirs(img_dir, depth_dir, split_file.parent)
with open(split_file, 'w') as f:
for i in tqdm(idxs):
i -= 1
stem = f'{i:05}'
img = (data['images'][(i - 1)].transpose((2, 1, 0)).astype(np.float32) / 255.0)
depth = data['depths'][(i - 1)].T[(..., None)]
io.np2pil(img).save((img_dir / f'{stem}.png'))
np.save((depth_dir / f'{stem}.npy'), depth)
f.write((stem + '\n'))
|
def main(dst):
data_file = (dst / 'nyu_depth_v2_labeled.mat')
split_file = (dst / 'splits.mat')
data = loadmat(data_file)
splits = sio.loadmat(split_file)
export_split('train', splits['trainNdxs'].squeeze(), data, dst)
export_split('test', splits['testNdxs'].squeeze(), data, dst)
data_file.unlink()
split_file.unlink()
|
def save_settings(**kwargs):
io.write_yaml(((PATHS['slow_tv'] / 'splits') / 'config.yaml'), kwargs)
|
def export_scene(args):
(vid_file, cat) = args
seq = vid_file.stem
seq_dir = (PATHS['slow_tv'] / seq)
stv.extract_frames(vid_file, save_dir=seq_dir, fps=fps, trim_start=trim, n_keep=n_keep, per_interval=per_interval, overwrite=overwrite)
seeds = [42, 195, 335, 558, 724]
for seed in seeds:
try:
stv.estimate_intrinsics(seq_dir, save_root=colmap_dir, n_imgs=n_colmap_imgs, interval=colmap_interval, seed=seed, overwrite=overwrite)
break
except RuntimeError:
print(f'-> Failed COLMAP intrinsics with seed "{seed}"...')
else:
raise RuntimeError(f'-> Tried {seeds} and they all failed!!')
stv.add_frames_to_split(seq_dir, cat, seq, 'all', p_train=p_train, skip=val_skip)
|
def main(args):
if write_settings:
save_settings(fps=fps, trim=trim, data_scale=data_scale, n_keep=n_keep, per_interval=per_interval, p_train=p_train, val_skip=val_skip, n_colmap_imgs=n_colmap_imgs, colmap_interval=colmap_interval)
cats = stv.load_categories(subcats=False)
video_files = io.get_files(vid_dir)
assert (len(cats) == len(video_files)), 'Non-matching SlowTV videos and labelled categories.'
if (args.idx is not None):
export_scene((video_files[args.idx], cats[args.idx]))
return
if (args.n_proc == 0):
[export_scene(args) for args in zip(video_files, cats)]
else:
with Pool(args.n_proc) as p:
list(tqdm(p.imap_unordered(export_scene, zip(video_files, cats)), total=len(cats)))
|
def main(dst):
print(f'-> Copying splits to "{dst}"...')
shutil.copytree((REPO_ROOT / 'api/data/splits'), dst, dirs_exist_ok=True)
(dst / FILE.name).unlink()
|
def save_metrics(file: Path, metrics: ty.U[(Metrics, ty.S[Metrics])]):
'Helper to save metrics.'
LOGGER.info(f'Saving results to "{file}"...')
file.parent.mkdir(exist_ok=True, parents=True)
write_yaml(file, metrics, mkdir=True)
|
def compute_eval_metrics(preds: ty.A, cfg_file: Path, align_mode: ty.U[(str, float)], nproc: ty.N[int]=None, max_items: ty.N[int]=None) -> tuple[(Metrics, ty.S[Metrics])]:
'Compute evaluation metrics from scaleless network disparities (see `compute_eval_preds`).\n\n :param preds: (NDArray) (b, h, w) Precomputed unscaled network predictions.\n :param cfg_file: (Path) Path to YAML config file.\n :param align_mode: (str|float) Strategy used to align the predictions to the ground-truth. {median, lsqr, 1, 5.4...}\n :param nproc: (None|int) Number of processes to use. `None` to let OS determine it.\n :param max_items: (None|int) Maximum number of items to process. Used for testing/debugging a subset.\n :return: (\n mean_metrics: (Metrics) Average metrics across the whole dataset.\n metrics: (list[Metrics]) Metrics for each item in the dataset.\n )\n '
cfg = load_yaml(cfg_file)
(cfg_ds, cfg_args) = (cfg['dataset'], cfg['args'])
try:
cfg_args['align_mode'] = float(align_mode)
except (ValueError, TypeError):
cfg_args['align_mode'] = align_mode
target_stem = cfg_ds.pop('target_stem', f"targets_{cfg.get('mode', 'test')}")
ds = parsers.get_ds({cfg_ds.pop('type'): cfg_ds})
ds = next(iter(ds.values()))
target_file = (ds.split_file.parent / f'{target_stem}.npz')
LOGGER.info(f'Loading targets from "{target_file}"...')
data = np.load(target_file, allow_pickle=True)
(mean_metrics, metrics) = MonoDepthEvaluator(**cfg_args).run(preds, data, nproc=nproc, max_items=max_items)
return (mean_metrics, metrics)
|
def save_preds(file: Path, preds: ty.A) -> None:
'Helper to save network predictions to a NPZ file. Required for submitted to the challenge.'
io.mkdirs(file.parent)
logging.info(f"Saving network predictions to '{file}'...")
np.savez_compressed(file, pred=preds)
|
def compute_preds(cfg: dict, ckpt: str, cfg_model: ty.N[list[Path]], device: ty.N[str], overwrite: bool) -> ty.A:
'Compute predictions for a given dataset and network cfg.\n\n `ckpt` can be provided as:\n - Path: Path to a pretrained checkpoint trained using the benchmark repository.\n - Name: Name indicating the external model type and variant to load, e.g. midas.MiDaS, newcrfs.indoor.\n\n Currently supported external models are: {\n midas.{MiDaS, DPT_Large, DPT_BEiT_L_512},\n newcrfs.{indoor,outdoor},\n }\n\n :param cfg: (dict) Dataset cfg, following `MonoDepthModule` conventions.\n :param ckpt: (str) Model checkpoint to load. Either our checkpoint file or external model name. See docs.\n :param cfg_model: (None|list[Path]) Optional model cfgs when loading our legacy models.\n :param device: (str) Device on which to compute predictions.\n :param overwrite: (bool) If `True`, compute predictions even if model has not finished training.\n :return:\n '
trigger_preds()
(model_type, name) = ckpt.split('.', maxsplit=2)
model_type = (model_type if (model_type in PRED_REG) else 'ours')
predictor = PRED_REG[model_type]()
if (model_type == 'ours'):
ckpt = find_model_file(ckpt)
if ((not (ckpt.parent / 'finished').is_file()) and (not overwrite)):
logging.error(f"Training for '{ckpt}' has not finished...")
logging.error('Set `--overwrite 1` to run this evaluation anyway...')
exit()
logging.info(f"Loading pretrained model from '{ckpt}'")
net = predictor.load_model(ckpt, cfg_model)
else:
net = predictor.load_model(name)
cfg.update({'shape': predictor.get_img_shape(cfg['type']), 'as_torch': True, 'use_aug': False, 'log_time': False})
ds = parsers.get_ds({cfg.pop('type'): cfg})
ds = list(ds.values())[0]
dl = DataLoader(ds, batch_size=12, num_workers=8, collate_fn=ds.collate_fn, pin_memory=True)
logging.info('Computing predictions...')
preds = predictor(net, dl, use_stereo_blend=False, device=device)
return preds
|
def get_models(root: Path, exp: str, dataset: str, ckpt: str='last', mode: str='*', res: str='results', models: ty.N[list[str]]=None, tag: str='') -> tuple[(dict[(str, list[Path])], list[str])]:
"Find all models and files associated with a particular experiment.\n NOTE: Parameters can use regex expressions, but overlapping names will be combined together. Use at your own risk.\n\n Found model names can be adjusted using the MODEL_TAGS dictionary.\n\n :param root: (Path) Root directory to search for models.\n :param exp: (str) Experiment name.\n :param dataset: (str) Evaluation dataset name.\n :param ckpt: (str) Checkpoint mode to retrieve. {last, best, *}\n :param mode: (str) Depth alignment mode to retrieve. {metric, median, lsqr, *}\n :param res: (str) Results directory name.\n :param models: (None|list[str]) List of models to retrieve. (Default: All models will be retrieved)\n :param tag: (str) Tag to append to model names. Include '_' to make more legible.\n :return: (\n eval_files: (dict[str, list[Path]]) Mapping from model names to all found files.\n models: (list[str]) List of model names found.\n )\n "
if isinstance(models, str):
models = models.split()
fname = f'{dataset}_{ckpt}_{mode}.yaml'
if (not models):
fs = sorted(root.glob(f'{exp}/**/{res}/{fname}'))
models = sorted({file.parents[2].stem for file in fs})
print('Evaluation Models:', models)
eval_files = {(m + tag): sorted(root.glob(f'{exp}/{m}/**/{res}/{fname}')) for m in models}
eval_files = {k: v for (k, v) in eval_files.items() if v}
models = list(eval_files)
return (eval_files, models)
|
def load_dfs(files: dict[(str, list[Path])]) -> pd.DataFrame:
'Load dict of YAML files into a single dataframe.\n\n :param files: (dict[str, list[Path]]) List of files for each model.\n :return: (DataFrame) Loaded dataframe, index based on the model key and a potential item number.\n '
dfs = [pd.json_normalize(load_yaml(f)) for fs in files.values() for f in fs]
df = pd.concat(dfs)
models = [f'{k}' for (k, fs) in files.items() for _ in fs]
df.index = pd.MultiIndex.from_product([models, dfs[0].index], names=['Model', 'Item'])
return df
|
def filter_df(df: pd.DataFrame) -> tuple[(pd.DataFrame, ty.S[int])]:
'Preprocess dataframe to include only AbsRel and (F-Score or delta) metrics.'
(metrics, metric_type) = (['AbsRel'], [(- 1)])
(delta, delta_legacy) = ('$\\delta_{.25}$', '$\\delta < 1.25$')
(f, f_legacy) = ('F-Score (10)', 'F-Score')
if ((f_legacy in df) and (f not in df)):
df = df.rename(columns={f_legacy: f})
if ((delta_legacy in df) and (delta not in df)):
df[delta] = (100 * df[delta_legacy])
df = df.drop(columns=[delta_legacy])
if (f in df):
metrics.append(f)
metric_type.append((+ 1))
if (f_legacy in df):
df[f] = (df[f].fillna(0) + df[f_legacy].fillna(0))
elif (delta in df):
metrics.append(delta)
metric_type.append((+ 1))
if (delta_legacy in df):
df[delta] = (df[delta].fillna(0) + (100 * df[delta_legacy].fillna(0)))
df = df[metrics]
df = df.rename(columns={'AbsRel': 'Rel', f: 'F'})
return (df, metric_type)
|
def get_df_mean(df: pd.DataFrame, models: ty.S[str], name: str='Mean') -> tuple[(pd.DataFrame, pd.DataFrame)]:
'Compute the average metrics and stddev across all model seeds.'
df2 = df.groupby(level=0)
df_mean = df2.agg('mean').reindex(models)
df_std = df2.agg('std').reindex(models)
df_std.columns.name = 'StdDev'
df_mean.columns.name = name
return (df_mean, df_std)
|
def add_multitask_metrics(df: pd.DataFrame, metric_types: ty.S[int], ref_idx: int=None) -> tuple[(pd.DataFrame, ty.S[int])]:
'Prepend multi-task metrics computed across all metrics.'
rel = compute_rel_improvement(df, metric_types, ref=ref_idx)
df.insert(0, ('MT', '\\%'), rel)
metric_types.insert(0, 1)
rank = compute_mean_rank(df, metric_types)
df.insert(0, ('MT', 'Rank'), rank)
metric_types.insert(0, (- 1))
return (df, metric_types)
|
def compute_rel_improvement(df: pd.DataFrame, metric_types: ty.S[int], ref: int=0) -> pd.Series:
'Compute average relative improvement w.r.t. a reference row index.\n\n :param df: (DataFrame) Input dataframe.\n :param metric_types: (list[int]) Metric type for each metric. {+1: Higher is better, -1: Lower is better}\n :param ref: (int) Reference row index to compute relative improvement w.r.t. (Default: 0)\n :return: (DataFrame) Computed relative improvement.\n '
df2 = (df * metric_types)
rel = ((df2 - df2.iloc[ref]) / df2.iloc[ref])
rel = ((100 * rel) * metric_types).mean(axis=1)
return rel
|
def compute_mean_rank(df: pd.DataFrame, metric_types: ty.S[int]) -> pd.Series:
'Compute the average ranking position across all metrics for each model.\n\n :param df: (DataFrame) Input dataframe.\n :param metric_types: (list[int]) Metric type for each metric. {+1: Higher is better, -1: Lower is better}\n :return: (DataFrame) Computed average ranking.\n '
ranks = (df * metric_types).rank(axis=0, ascending=False).mean(axis=1)
return ranks
|
def main():
pd.set_option('display.max_rows', None, 'display.max_columns', None)
root = MODEL_ROOTS[(- 1)]
splits = ['kitti_eigen_benchmark', 'mc', 'ddad', 'diode_outdoor', 'sintel', 'syns_test', 'diode_indoors', 'nyud', 'tum']
ref = 0
(dfs, stds, metric_types) = ([], [], [])
for split in splits:
(fs, ms) = list(zip(*[get_models(root, exp='kbr', dataset=split, mode='lsqr', ckpt='last', models='base fwd no_ar_aug no_learn_K no_rand_supp none'), get_models(root, exp='benchmark', dataset=split, res='results', mode='lsqr', ckpt='best', models='garg monodepth2_MS diffnet_MS hrdepth_MS'), get_models(root, exp='benchmark', dataset=split, res='results', mode='stereo', ckpt='best', models='garg monodepth2_MS diffnet_MS hrdepth_MS'), get_models(root, exp='midas', dataset=split, mode='lsqr', ckpt='best', models='MiDaS DPT_Large DPT_BEiT_L_512'), get_models(root, exp='newcrfs', dataset=split, mode='lsqr', ckpt='best')]))
files = {}
for f in fs:
files |= f
models = [i for m in ms for i in m]
df = load_dfs(files)
(df, metric_type) = filter_df(df)
(df_mean, df_std) = get_df_mean(df, models, name=split)
dfs.append(df_mean)
stds.append(df_std)
metric_types.extend(metric_type)
for d in dfs:
d.columns = pd.MultiIndex.from_product([[d.columns.name], d.columns], names=['dataset', 'metrics'])
df = pd.concat(dfs, axis=1)
(df, metric_types) = add_multitask_metrics(df, metric_types, ref_idx=ref)
print(TableFormatter.from_df(df, metrics=metric_types).to_latex(precision=2))
for d in stds:
d.columns = pd.MultiIndex.from_product([[d.columns.name], d.columns], names=['dataset', 'metrics'])
std = pd.concat(stds, axis=1)
print(TableFormatter.from_df(std, metrics=(- 1)).to_latex(precision=2))
|
def compute_preds(name: str, cfg: dict, ckpt: str, cfg_model: ty.N[list[Path]], device: ty.N[str], overwrite: bool) -> None:
'Compute predictions for a given dataset and network cfg.\n\n :param name: (str) Name used when saving predictions.\n :param cfg: (dict) Dataset cfg, following `MonoDepthModule` conventions.\n :param ckpt: (str) Model checkpoint to load. Either our checkpoint file or external model name. See docs.\n :param cfg_model: (None|list[Path]) Optional model cfgs when loading our legacy models.\n :param device: (str) Device on which to compute predictions.\n :param overwrite: (bool) If `True`, compute predictions even if model has not finished training.\n :return:\n '
trigger_preds()
(model_type, model_name) = ckpt.split('.', maxsplit=2)
model_type = (model_type if (model_type in PRED_REG) else 'ours')
predictor = PRED_REG[model_type]()
if (model_type == 'ours'):
ckpt = find_model_file(ckpt)
if ((not (ckpt.parent / 'finished').is_file()) and (not overwrite)):
logging.error(f"Training for '{ckpt}' has not finished...")
logging.error('Set `--overwrite 1` to run this evaluation anyway...')
exit()
logging.info(f"Loading pretrained model from '{ckpt}'")
net = predictor.load_model(ckpt, cfg_model)
else:
net = predictor.load_model(model_name)
cfg.update({'shape': predictor.get_img_shape(cfg['type']), 'as_torch': True, 'use_aug': False, 'log_time': False})
ds = parsers.get_ds({cfg.pop('type'): cfg})
ds = list(ds.values())[0]
dl = DataLoader(ds, batch_size=16, num_workers=8, collate_fn=ds.collate_fn, pin_memory=True)
logging.info('Computing predictions...')
pool = Pool()
predictor.apply(net, dl, func=process_batch_preds, use_stereo_blend=False, device=device, name=name, pool=pool)
pool.close()
pool.join()
|
def process_batch_preds(batch: ty.BatchData, preds: ty.A, name: str, pool: Pool) -> None:
'Align depth predictions and save files.'
m = batch[2]
files = [mfr.Item(*items).get_depth_file(name) for items in zip(m['mode'], m['scene'], m['seq'], m['stem'])]
(targets, preds) = ops.to_np([batch[1]['depth'].squeeze(), preds.squeeze()], permute=False)
args = zip(targets, preds, files)
pool.map_async(process_single_pred, args)
|
def process_single_pred(args):
'Upsample, align and save a single prediction.'
(target, pred, file) = args
pred = upsample(pred, target)
pred = align(pred, target)
save_depth_image(file, pred)
|
def upsample(pred: ty.A, target: ty.A) -> ty.A:
'Upsample predictions to match target shape.'
if (pred.shape == target.shape):
return pred
(h, w) = target.shape
pred = cv2.resize(pred, (w, h), interpolation=cv2.INTER_LINEAR)
return pred
|
def align(pred: ty.A, target: ty.A) -> ty.A:
'Align predictions to ground-truth depth using least-squares and convert into depths.'
mask = ((target > 0) & (target < 100))
(scale, shift) = MonoDepthEvaluator._align_lsqr(pred[mask], to_inv(target[mask]))
pred = ((scale * pred) + shift)
pred = to_inv(pred)
return pred
|
def save_depth_image(path: str, depth: ty.A) -> None:
'Save depth map in MapFreeReloc format (png with depth in mm).'
depth = (depth * 1000).astype(np.uint16)
cv2.imwrite(str(path), depth)
|
def align_median(pred: np.ndarray, target: np.ndarray) -> float:
'Return scale factor for median-depth alignment.'
return (np.median(target) / np.median(pred))
|
def align_lsqr(pred: np.ndarray, target: np.ndarray) -> list[(float, float)]:
'Return scale & shift factor for least-squares alignment.'
A = np.array([[(pred ** 2).sum(), pred.sum()], [pred.sum(), pred.shape[0]]])
if (np.linalg.det(A) <= 0):
return (0, 0)
b = np.array([(pred * target).sum(), target.sum()])
x = (np.linalg.inv(A) @ b)
return x.tolist()
|
def main():
def to_inv(depth: np.ndarray, eps: float=1e-05) -> np.ndarray:
return ((depth > 0) / (depth + eps))
depth = np.load('.../kbr/file.npy')
lidar = np.load('.../lidar/file.npy')
valid = ((lidar > 0) & (lidar < 100))
(depth_mask, lidar_mask) = (depth[valid], lidar[valid])
(scale, shift) = align_median(depth_mask, lidar_mask)
(depth, depth_mask) = ((depth * scale), (depth_mask * scale))
(disp, disp_mask) = (to_inv(depth), to_inv(depth_mask))
(scale, shift) = align_lsqr(disp_mask, to_inv(lidar_mask))
(disp, disp_mask) = (((disp * scale) + shift), ((disp_mask * scale) + shift))
(depth, depth_mask) = (to_inv(disp), to_inv(disp_mask))
|
def forward_beit(net, x):
return forward_adapted_unflatten(net, x, 'forward_features')
|
def make_beitl16_512(pretrained, use_readout='ignore', hooks=(5, 11, 17, 23)):
model = timm.create_model('beit_large_patch16_512', pretrained=pretrained)
return _make_beit_backbone(model, features=[256, 512, 1024, 1024], size=[512, 512], hooks=hooks, vit_features=1024, use_readout=use_readout)
|
def make_beitl16_384(pretrained, use_readout='ignore', hooks=(5, 11, 17, 23)):
model = timm.create_model('beit_large_patch16_384', pretrained=pretrained)
return _make_beit_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout)
|
def make_beitb16_384(pretrained, use_readout='ignore', hooks=(2, 5, 8, 11)):
model = timm.create_model('beit_base_patch16_384', pretrained=pretrained)
return _make_beit_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout)
|
def _make_beit_backbone(model, features=(96, 192, 384, 768), size=(384, 384), hooks=(0, 4, 8, 11), vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1):
backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, start_index_readout)
backbone.model.patch_embed.forward = types.MethodType(_patch_embed_forward, backbone.model.patch_embed)
backbone.model.forward_features = types.MethodType(_beit_forward_features, backbone.model)
for block in backbone.model.blocks:
attn = block.attn
attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn)
attn.forward = types.MethodType(_attention_forward, attn)
attn.relative_position_indices = {}
block.forward = types.MethodType(_block_forward, block)
return backbone
|
def _patch_embed_forward(self, x):
'Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes.'
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x
|
def _beit_forward_features(self, x):
'Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes.'
resolution = x.shape[2:]
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], (- 1), (- 1)), x), dim=1)
if (self.pos_embed is not None):
x = (x + self.pos_embed)
x = self.pos_drop(x)
rel_pos_bias = (self.rel_pos_bias() if (self.rel_pos_bias is not None) else None)
for blk in self.blocks:
if (self.grad_checkpointing and (not torch.jit.is_scripting())):
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
|
def _get_rel_pos_bias(self, window_size):
'Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.'
old_height = ((2 * self.window_size[0]) - 1)
old_width = ((2 * self.window_size[1]) - 1)
new_height = ((2 * window_size[0]) - 1)
new_width = ((2 * window_size[1]) - 1)
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = ((new_height * new_width) + 3)
old_sub_table = old_relative_position_bias_table[:(old_num_relative_distance - 3)]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, (- 1)).permute(0, 3, 1, 2)
new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode='bilinear')
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape((new_num_relative_distance - 3), (- 1))
new_relative_position_bias_table = torch.cat([new_sub_table, old_relative_position_bias_table[(old_num_relative_distance - 3):]])
key = f'{window_size[1]},{window_size[0]}'
if (key not in self.relative_position_indices.keys()):
self.relative_position_indices[key] = gen_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[self.relative_position_indices[key].view((- 1))].view(((window_size[0] * window_size[1]) + 1), ((window_size[0] * window_size[1]) + 1), (- 1))
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
return relative_position_bias.unsqueeze(0)
|
def _attention_forward(self, x, resolution, shared_rel_pos_bias=None):
'Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes.'
(B, N, C) = x.shape
qkv_bias = (torch.cat((self.q_bias, self.k_bias, self.v_bias)) if (self.q_bias is not None) else None)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, (- 1)).permute(2, 0, 3, 1, 4)
(q, k, v) = qkv.unbind(0)
q = (q * self.scale)
attn = (q @ k.transpose((- 2), (- 1)))
if (self.relative_position_bias_table is not None):
window_size = tuple((np.array(resolution) // 16))
attn = (attn + self._get_rel_pos_bias(window_size))
if (shared_rel_pos_bias is not None):
attn = (attn + shared_rel_pos_bias)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, (- 1))
x = self.proj(x)
x = self.proj_drop(x)
return x
|
def _block_forward(self, x, resolution, shared_rel_pos_bias=None):
'Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes.'
if (self.gamma_1 is None):
x = (x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
else:
x = (x + self.drop_path((self.gamma_1 * self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias))))
x = (x + self.drop_path((self.gamma_2 * self.mlp(self.norm2(x)))))
return x
|
class FeatureInfo():
'Encoder multi-scale feature information. Used for compatibility with `timm`.'
def __init__(self, n_ch: ty.S[int]):
self.n_ch = n_ch
self.red = [(32 // (2 ** i)) for i in range((len(self.n_ch) - 1), (- 1), (- 1))]
def channels(self) -> ty.S[int]:
return self.n_ch
def reduction(self) -> ty.S[int]:
return self.red
|
class DptEncoder(nn.Module):
def __init__(self, enc_name: str, pretrained: bool=True, use_readout: str='project'):
super().__init__()
(n, pt, r) = (enc_name, pretrained, use_readout)
if (n == 'beitl16_512'):
self.net = make_beitl16_512(pt, hooks=[5, 11, 17, 23], use_readout=r)
elif (n == 'beitl16_384'):
self.net = make_beitl16_384(pt, hooks=[5, 11, 17, 23], use_readout=r)
elif (n == 'beitb16_384'):
self.net = make_beitb16_384(pt, hooks=[2, 5, 8, 11], use_readout=r)
elif (n == 'swin2l24_384'):
self.net = make_swin2l24_384(pt, hooks=[1, 1, 17, 1])
elif (n == 'swin2b24_384'):
self.net = make_swin2b24_384(pt, hooks=[1, 1, 17, 1])
elif (n == 'swin2t16_256'):
self.net = make_swin2t16_256(pt, hooks=[1, 1, 5, 1])
elif (n == 'swinl12_384'):
self.net = make_swinl12_384(pt, hooks=[1, 1, 17, 1])
elif (n == 'vitb_rn50_384'):
self.net = make_vitb_rn50_384(pt, hooks=[0, 1, 8, 11], use_readout=r)
elif (n == 'vitl16_384'):
self.net = make_vitl16_384(pt, hooks=[5, 11, 17, 23], use_readout=r)
elif (n == 'vitb16_384'):
self.net = make_vitb16_384(pt, hooks=[2, 5, 8, 11], use_readout=r)
else:
raise ValueError(f"Backbone '{n}' not implemented")
self.feature_info = FeatureInfo(self.net.n_ch)
if ('beit' in n):
self.fwd = forward_beit
elif ('swin' in n):
self.fwd = forward_swin
elif ('vit' in n):
self.fwd = forward_vit
def forward(self, x: ty.T) -> ty.S[ty.T]:
return self.fwd(self.net, x.to(memory_format=torch.channels_last))
|
def forward_swin(net, x):
return forward_default(net, x)
|
def make_swinl12_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swin_large_patch4_window12_384', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2l24_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_large_window12to24_192to384_22kft1k', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2b24_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_base_window12to24_192to384_22kft1k', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2t16_256(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_tiny_window16_256', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks, patch_grid=[64, 64])
|
def _make_swin_backbone(model, hooks=(1, 1, 17, 1), patch_grid=(96, 96)):
net = nn.Module()
net.model = model
net.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation('1'))
net.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation('2'))
net.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation('3'))
net.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation('4'))
net.activations = activations
patch_grid = np.array(getattr(model, 'patch_grid', patch_grid), dtype=int)
net.act_postprocess1 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size(patch_grid)))
net.act_postprocess2 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid // 2))))
net.act_postprocess3 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid // 4))))
net.act_postprocess4 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid // 8))))
return net
|
class ResidualBlock(nn.Module):
'Residual convolution module.'
def __init__(self, ch: int, act: nn.Module, use_bn: bool=False):
super().__init__()
self.bn = use_bn
self.conv1 = nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1, bias=True, groups=1)
self.conv2 = nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1, bias=True, groups=1)
if self.bn:
self.bn1 = nn.BatchNorm2d(ch)
self.bn2 = nn.BatchNorm2d(ch)
self.act = act
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
out = self.act(x)
out = self.conv1(out)
if self.bn:
out = self.bn1(out)
out = self.act(out)
out = self.conv2(out)
if self.bn:
out = self.bn2(out)
return self.skip_add.add(out, x)
|
class FeatureFusionBlock(nn.Module):
'Feature fusion block.'
def __init__(self, ch: int, act: nn.Module, deconv: bool=False, use_bn: bool=False, expand: bool=False, align_corners: bool=True, size: Optional[tuple[(int, int)]]=None):
super().__init__()
self.deconv = deconv
self.align_corners = align_corners
self.expand = expand
self.size = size
out_ch = ch
if self.expand:
out_ch //= 2
self.resConfUnit1 = ResidualBlock(ch, act, use_bn)
self.resConfUnit2 = ResidualBlock(ch, act, use_bn)
self.out_conv = nn.Conv2d(ch, out_ch, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.skip_add = nn.quantized.FloatFunctional()
def upsample(self, x: Tensor, size: Optional[tuple[(int, int)]]=None) -> Tensor:
if size:
kw = {'size': size}
elif self.size:
kw = {'size': self.size}
else:
kw = {'scale_factor': 2}
return F.interpolate(x, **kw, mode='bilinear', align_corners=self.align_corners)
def forward(self, *xs: Tensor, size: Optional[tuple[(int, int)]]=None) -> Tensor:
out = xs[0]
if (len(xs) == 2):
res = self.resConfUnit1(xs[1])
out = self.skip_add.add(out, res)
out = self.resConfUnit2(out)
out = self.upsample(out, size=size)
out = self.out_conv(out)
return out
|
class DptDecoder(nn.Module):
def __init__(self, num_ch_enc: list[int], enc_sc: list[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: list[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='relu'):
super().__init__()
self.num_ch_enc = num_ch_enc
self.enc_sc = enc_sc
self.upsample_mode = upsample_mode
self.use_skip = use_skip
self.out_sc = out_sc
self.out_ch = out_ch
self.out_act = out_act
self.activation = ACT[self.out_act]
self.num_ch_dec = 256
self.layers = nn.ModuleList([nn.Conv2d(ch, self.num_ch_dec, kernel_size=3, stride=1, padding=1, bias=False) for ch in self.num_ch_enc])
self.refine = nn.ModuleList([self._make_fusion_block(self.num_ch_dec, use_bn=False) for _ in self.num_ch_enc])
self.out_conv = nn.ModuleDict({str(i): self._make_head(self.num_ch_dec, self.out_ch, self.activation, hidden_ch=32) for i in self.out_sc})
@staticmethod
def _make_fusion_block(ch: int, use_bn: bool, size: Optional[tuple[(int, int)]]=None):
return FeatureFusionBlock(ch, act=nn.ReLU(False), deconv=False, use_bn=use_bn, expand=False, align_corners=True, size=size)
@staticmethod
def _make_head(in_ch: int, out_ch: int, act: nn.Module, hidden_ch: Optional[int]=None) -> nn.Module:
return nn.Sequential(nn.Conv2d(in_ch, (in_ch // 2), kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d((in_ch // 2), hidden_ch, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(hidden_ch, out_ch, kernel_size=1, stride=1, padding=0), act)
def forward(self, feat: list[Tensor]) -> dict[(int, Tensor)]:
feat = [conv(f) for (conv, f) in zip(self.layers, feat)]
out = {}
x = feat[(- 1)]
n = (len(feat) - 1)
for i in range(n, (- 1), (- 1)):
xs = ([x] if (i == n) else [x, feat[i]])
x = self.refine[i](*xs)
if (i in self.out_sc):
out[i] = self.out_conv[str(i)](x)
return out
|
class MonodepthDecoder(nn.Module):
"From Monodepth(2) (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (list[int]) List of channels per encoder stage.\n :param enc_sc: (list[int]) List of downsampling factor per encoder stage.\n :param upsample_mode: (str) Torch upsampling mode. {'nearest', 'bilinear'...}\n :param use_skip: (bool) If `True`, add skip connections from corresponding encoder stage.\n :param out_sc: (list[int]) List of multi-scale output downsampling factor as 2**s.\n :param out_ch: (int) Number of output channels.\n :param out_act: (str) Activation to apply to each output stage.\n "
def __init__(self, num_ch_enc: list[int], enc_sc: list[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: list[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='sigmoid'):
super().__init__()
self.num_ch_enc = num_ch_enc
self.enc_sc = enc_sc
self.upsample_mode = upsample_mode
self.use_skip = use_skip
self.out_sc = out_sc
self.out_ch = out_ch
self.out_act = out_act
if (self.out_act not in ACT):
raise KeyError(f'Invalid activation key. ({self.out_act} vs. {tuple(ACT.keys())}')
self.act = ACT[self.out_act]
self.num_ch_dec = [16, 32, 64, 128, 256]
self.convs = OrderedDict()
for i in range(4, (- 1), (- 1)):
num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)])
num_ch_out = self.num_ch_dec[i]
self.convs[f'upconv_{i}_{0}'] = conv_block(num_ch_in, num_ch_out)
num_ch_in = self.num_ch_dec[i]
sf = (2 ** i)
if (self.use_skip and (sf in self.enc_sc)):
idx = self.enc_sc.index(sf)
num_ch_in += self.num_ch_enc[idx]
num_ch_out = self.num_ch_dec[i]
self.convs[f'upconv_{i}_{1}'] = conv_block(num_ch_in, num_ch_out)
for i in self.out_sc:
self.convs[f'outconv_{i}'] = conv3x3(self.num_ch_dec[i], self.out_ch)
self.decoder = nn.ModuleList(list(self.convs.values()))
def forward(self, feat: list[Tensor]) -> dict[(int, Tensor)]:
out = {}
x = feat[(- 1)]
for i in range(4, (- 1), (- 1)):
x = self.convs[f'upconv_{i}_{0}'](x)
x = [F.interpolate(x, scale_factor=2, mode=self.upsample_mode)]
sf = (2 ** i)
if (self.use_skip and (sf in self.enc_sc)):
idx = self.enc_sc.index(sf)
x += [feat[idx]]
x = torch.cat(x, 1)
x = self.convs[f'upconv_{i}_{1}'](x)
if (i in self.out_sc):
out[i] = self.act(self.convs[f'outconv_{i}'](x))
return out
|
def main():
parser = ArgumentParser(description='Monocular depth trainer.')
parser.add_argument('--cfg-files', '-c', type=Path, nargs='*', help='Path to YAML config files to load (default, override).')
parser.add_argument('--ckpt-dir', '-o', default=MODEL_ROOTS[(- 1)], type=Path, help='Root path to store checkpoint in.')
parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.')
parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.')
parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.')
parser.add_argument('--gpus', '-g', default=1, type=int, help='Number of training GPUs.')
parser.add_argument('--log', '-l', default='info', help='Logging verbosity level.')
args = parser.parse_args()
set_logging_level(args.log)
LOGGER.info(f"Creating config from {[f'{f.parent.stem}/{f.name}' for f in args.cfg_files]}...")
LOGGER.warning('Please ensure configs are in the correct order! (default, overwrite1, overwrite2...)')
cfg = io.load_merge_yaml(*args.cfg_files)
cfg['loader']['seed'] = args.seed
cfg['loader']['use_ddp'] = (args.gpus > 1)
version = f'{args.version:03}'
save_dir = ((args.ckpt_dir / args.name) / version)
io.mkdirs(save_dir)
logger_type = cfg['trainer'].get('logger', 'wandb')
if (logger_type == 'tensorboard'):
logger = pll.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=version, default_hp_metric=False)
elif (logger_type == 'wandb'):
logger = pll.WandbLogger(save_dir=save_dir, version=f'{args.name}_{version}', project=args.ckpt_dir.stem, log_model=False, resume=None)
else:
raise ValueError(f'Logger "{logger_type}" not supported. Please choose from "{{tensorboard, wandb}}"')
monitor = cfg['trainer'].get('monitor', 'AbsRel')
monitor = (f'val_losses/{monitor}' if (monitor == 'loss') else f'val_metrics/{monitor}')
mode = ('max' if ('Acc' in monitor) else 'min')
cb_ckpt = plc.ModelCheckpoint(dirpath=(save_dir / 'models'), filename='best', auto_insert_metric_name=False, monitor=monitor, mode=mode, save_last=True, save_top_k=1, verbose=True)
cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.RichProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger(n_imgs=6, n_cols=2)]
if cfg['trainer'].get('swa'):
cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.75, annealing_epochs=5, swa_lrs=cfg['optimizer']['lr']))
if cfg['trainer'].get('early_stopping'):
cbks.append(plc.EarlyStopping(monitor=monitor, mode=mode, patience=5))
pl.seed_everything(args.seed)
if (path := cfg['trainer'].get('load_ckpt')):
path = find_model_file(path)
LOGGER.info(f'Loading model from checkpoint: {path}')
model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True)
else:
model = MonoDepthModule(cfg)
resume_path = None
if cfg['trainer'].get('resume_training'):
LOGGER.info('Resuming training...')
if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file():
resume_path = path
else:
LOGGER.warning(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...')
trainer = pl.Trainer(devices=args.gpus, accelerator='gpu', strategy='auto', max_epochs=cfg['trainer']['max_epochs'], limit_train_batches=1.0, limit_val_batches=200, accumulate_grad_batches=cfg['trainer'].get('accumulate_grad_batches', 1), log_every_n_steps=cfg['trainer'].get('log_every_n_steps', 100), use_distributed_sampler=False, benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), logger=logger, callbacks=cbks, enable_model_summary=False)
LOGGER.info(f'-> Number of training batches: {trainer.num_training_batches}...')
if model.auto_scale_lr:
LOGGER.info(f'Scaling LR "{model.lr}" using "{trainer.num_devices}" devices and accumulate "{trainer.accumulate_grad_batches}" batches...')
model.lr *= (trainer.num_devices * trainer.accumulate_grad_batches)
trainer.fit(model, ckpt_path=resume_path)
|
def main():
parser = ArgumentParser(description='Monocular depth trainer.')
parser.add_argument('--cfg-files', '-c', type=Path, nargs='*', help='Path to YAML config files to load (default, override).')
parser.add_argument('--ckpt-dir', '-o', default=Path('/tmp'), type=Path, help='Root path to store checkpoint in.')
parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.')
parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.')
parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.')
parser.add_argument('--gpus', '-g', default=1, type=int, help='Number of training GPUs.')
parser.add_argument('--log', '-l', default='info', help='Logging verbosity level.')
args = parser.parse_args()
set_logging_level(args.log)
LOGGER.info(f"Creating config from {[f'{f.parent.stem}/{f.name}' for f in args.cfg_files]}...")
LOGGER.warning('Please ensure configs are in the correct order! (default, overwrite1, overwrite2...)')
cfg = io.load_merge_yaml(*args.cfg_files)
cfg['loader']['seed'] = args.seed
cfg['loader']['use_ddp'] = (args.gpus > 1)
version = f'{args.version:03}'
save_dir = ((args.ckpt_dir / args.name) / version)
io.mkdirs(save_dir)
logger_type = cfg['trainer'].get('logger', 'wandb')
if (logger_type == 'tensorboard'):
logger = pll.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=version, default_hp_metric=False)
elif (logger_type == 'wandb'):
logger = pll.WandbLogger(save_dir=save_dir, version=f'{args.name}_{version}', project=args.ckpt_dir.stem, log_model=False, resume=None)
else:
raise ValueError(f'Logger "{logger_type}" not supported. Please choose from "{{tensorboard, wandb}}"')
monitor = cfg['trainer'].get('monitor', 'AbsRel')
monitor = (f'val_losses/{monitor}' if (monitor == 'loss') else f'val_metrics/{monitor}')
mode = ('max' if ('Acc' in monitor) else 'min')
cb_ckpt = plc.ModelCheckpoint(dirpath=(save_dir / 'models'), filename='best', auto_insert_metric_name=False, monitor=monitor, mode=mode, save_last=True, save_top_k=1, verbose=True)
cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.TQDMProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger(n_imgs=6, n_cols=2)]
if cfg['trainer'].get('swa'):
cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.75, annealing_epochs=5, swa_lrs=cfg['optimizer']['lr']))
if cfg['trainer'].get('early_stopping'):
cbks.append(plc.EarlyStopping(monitor=monitor, mode=mode, patience=5))
pl.seed_everything(args.seed)
if (path := cfg['trainer'].get('load_ckpt')):
path = find_model_file(path)
LOGGER.info(f'Loading model from checkpoint: {path}')
model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True)
else:
model = MonoDepthModule(cfg)
resume_path = None
if cfg['trainer'].get('resume_training'):
LOGGER.info('Resuming training...')
if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file():
resume_path = path
else:
LOGGER.warning(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...')
num_batches = 10
max_epochs = 10
trainer = pl.Trainer(devices=args.gpus, accelerator='gpu', strategy='auto', max_epochs=max_epochs, limit_train_batches=num_batches, limit_val_batches=num_batches, accumulate_grad_batches=cfg['trainer'].get('accumulate_grad_batches', 1), log_every_n_steps=num_batches, use_distributed_sampler=False, benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), detect_anomaly=True, logger=logger, callbacks=cbks, enable_model_summary=False, profiler='simple')
if model.auto_scale_lr:
LOGGER.info(f'Scaling LR "{model.lr}" using "{trainer.num_devices}" devices and accumulate "{trainer.accumulate_grad_batches}" batches...')
model.lr *= (trainer.num_devices * trainer.accumulate_grad_batches)
trainer.fit(model, ckpt_path=resume_path)
|
def _num_pix(shape: ty.S[int]) -> int:
'Return the number of elements in a 2D image.'
assert (len(shape) == 2)
return (shape[0] * shape[1])
|
def _find_closest_multiple(i: ty.U[(int, float)], n: int=32) -> int:
'Return the closest multiple of `n` wrt the input `i`.'
return (round((i / n)) * n)
|
@torch.no_grad()
def aspect_ratio_aug(batch: ty.BatchData, p: float=1.0, crop_min: float=0.5, crop_max: float=1.0, ref_shape: ty.N[ty.S[int]]=None) -> ty.BatchData:
'Augmentation to change the aspect ratio of the input images.\n\n NOTE: Augmentation happens in-place!\n NOTE: If available, ground-truth depth maps are also resized. This is questionable and results in unreliable metrics.\n\n The augmentation consists of a crop augmentation followed by a resize. The crop augmentation extracts a centre\n crop from the images based on a sampled aspect ratio (see `RATIOS`). At least one of the dimensions (height or\n width) is guaranteed to be between [crop_min, crop_max] of the original image size. The resize augmentation adjusts\n the resolution of the extracted crop such that it has equal or fewer pixels than `ref_shape`.\n\n :param batch: (BatchData) Input training batch.\n :param p: (float) Probability of applying the augmentation [0, 1].\n :param crop_min: (float) Minimum relative size of the sampled crop [0, 1].\n :param crop_max: (float) Maximum relative size of the sampled crop [0, 1].\n :param ref_shape: (None|(int, int)) Reference shape to determine optimal resize. If `None` use original image shape.\n :return: (BatchData) Augmented training batch. Note that modification happens in-place.\n '
sh = batch[0]['imgs'].shape[(- 2):]
if (random.random() > p):
return (resize_aug(batch, ref_shape, eps=1) if (ref_shape and (tuple(ref_shape) != tuple(sh))) else batch)
ref_shape = (ref_shape or sh)
batch = crop_aug(batch, min=crop_min, max=crop_max)
batch = resize_aug(batch, ref_shape=ref_shape, eps=0.8)
return batch
|
def crop_aug(batch: ty.BatchData, min: float=0.5, max: float=1.0) -> ty.BatchData:
'Apply a centre crop with a random aspect ratio.\n\n :param batch: (BatchData) Input training batch.\n :param min: (float) Minimum relative size of the sampled crop [0, 1].\n :param max: (float) Maximum relative size of the sampled crop [0, 1].\n :return: (BatchData) Augmented training batch. Note that modification happens in-place.\n '
(x, y, m) = batch
shape = x['imgs'].shape[(- 2):]
(crop_shape, ratio) = sample_crop(shape, min, max)
fn = partial(KT.center_crop, size=crop_shape, mode='bilinear', align_corners=False)
if ('augs' not in m):
m['augs'] = []
m['augs'].append(f'{list(shape)} -> {crop_shape} -> {RATIO2LABEL[ratio]}')
(n, b) = x['supp_imgs'].shape[:2]
(x['imgs'], y['imgs'], x['supp_imgs'], y['supp_imgs']) = fn(torch.cat((x['imgs'], y['imgs'], x['supp_imgs'].flatten(0, 1), y['supp_imgs'].flatten(0, 1)))).split((b, b, (n * b), (n * b)), dim=0)
x['supp_imgs'] = x['supp_imgs'].unflatten(dim=0, sizes=(n, b))
y['supp_imgs'] = y['supp_imgs'].unflatten(dim=0, sizes=(n, b))
if ('depth' in y):
y['depth'] = fn(y['depth'])
if ('depth_hints' in y):
y['depth_hints'] = fn(y['depth_hints'])
if ('K' in y):
y['K'] = geo.centre_crop_K(y['K'], crop_shape, shape)
return (x, y, m)
|
def sample_crop(shape: ty.S[int], min: float=0.5, max: float=1.0) -> tuple[(ty.S[int], float)]:
'Randomly sample a centre crop with a new aspect ratio.\n\n NOTE: In practice, we only guarantee that one of the dimensions will be between [min, max]. This is done to allow\n for additional flexibility when sampling aspect ratios that are very different from the original image.\n\n The general approach is to sample a random aspect ratio and coordinate for one of the dimensions (h or w). In order\n to ensure we have a uniform distribution of aspect ratios (independent of the original shape) we sample `n`\n possible crops with the same aspect ratio starting from both height and width. The final crop is randomly sampled\n from the resulting valid crops.\n\n :param shape: (int, int) Shape of the input image.\n :param min: (float) Minimum crop size [0, 1].\n :param max: (float) Maximum crop size [0, 1].\n :return: ((int, int), float) The sampled crop size and corresponding aspect ratio.\n '
assert (max >= min)
n = 10
hs = torch.randint(int((shape[0] * min)), int((shape[0] * max)), (n,))
ws = torch.randint(int((shape[1] * min)), int((shape[1] * max)), (n,))
r = random.choice(RATIOS)
(hs, ws) = (torch.cat((hs, (ws / r).long())), torch.cat(((r * hs).long(), ws)))
valid = ((((hs >= 0) & (hs <= shape[0])) & (ws >= 0)) & (ws <= shape[1]))
i = random.choice(valid.nonzero().squeeze())
shape = (hs[i].item(), ws[i].item())
return (shape, r)
|
def resize_aug(batch: ty.BatchData, ref_shape: ty.S[int], eps: float=0.8) -> ty.BatchData:
'Apply a resize augmentation to match the number of pixels in `ref_shape`.\n\n NOTE: Resizing depth maps (especially sparse LiDAR) is questionable and will likely lead to unreliable metrics.\n\n :param batch: (BatchData) Input training batch.\n :param ref_shape: (int, int) Reference shape to match the number of pixels.\n :param eps: (float) Max percentage of ref_shape pixels to keep [0, 1].\n :return: (BatchData) Augmented training batch. Note that modification happens in-place.\n '
(x, y, m) = batch
new_shape = x['imgs'].shape[(- 2):]
res_shape = sample_resize(new_shape, ref_shape, eps=eps)
fn = partial(F.interpolate, size=res_shape, mode='bilinear', align_corners=False)
if ('augs' not in m):
m['augs'] = []
m['augs'].append(str(res_shape))
(n, b) = x['supp_imgs'].shape[:2]
(x['imgs'], y['imgs'], x['supp_imgs'], y['supp_imgs']) = fn(torch.cat((x['imgs'], y['imgs'], x['supp_imgs'].flatten(0, 1), y['supp_imgs'].flatten(0, 1)))).split((b, b, (n * b), (n * b)), dim=0)
x['supp_imgs'] = x['supp_imgs'].unflatten(dim=0, sizes=(n, b))
y['supp_imgs'] = y['supp_imgs'].unflatten(dim=0, sizes=(n, b))
if ('depth' in y):
y['depth'] = fn(y['depth'])
if ('depth_hints' in y):
raise RuntimeError('Geometric augmentation should not be combined with depth hints... Interpolating depth is not well defined.')
if ('K' in y):
y['K'] = geo.resize_K(y['K'], res_shape, shape=new_shape)
return (x, y, m)
|
def sample_resize(shape: ty.S[int], ref_shape: ty.S[int], eps: float=0.8) -> ty.S[int]:
"Sample the resize shape for the new aspect ratio that provides the same number of pixels as `ref_shape`.\n\n NOTE: Sampled shape will always be a multiple of 32, as required by most networks. This also means the output shape\n will not exactly match the original aspect ratio, but it's close enough.\n\n :param shape: (int, int) Input image shape.\n :param ref_shape: (int, int) Reference shape to match number of pixels to.\n :param eps: (float) Max percentage of ref_shape pixels to keep [0, 1].\n :return: (int, int) Sampled resize shape for the input image.\n "
mul = 32
(n, n_ref) = (_num_pix(shape), _num_pix(ref_shape))
r = ((n_ref / n) ** 0.5)
res_shape = [_find_closest_multiple((r * i), n=mul) for i in shape]
while (_num_pix(res_shape) > (n_ref * eps)):
res_shape = [(i - mul) for i in res_shape]
return res_shape
|
class BaseDataset(ABC, Dataset):
'Base dataset class that all others should inherit from.\n\n The idea is to provide a common structure and data format. Additionally, provide some nice functionality and\n automation for the more boring stuff. Datasets are defined as providing the following dicts for each item:\n - x: Inputs to the network (typically `imgs`).\n - y: Additional data required for loss computation (e.g. `labels`) or logging (e.g. non-augmented images).\n - m: Metadata for the given item, typically for logging.\n\n BaseDataset will automatically add the following fields to `m`:\n - items: Item number (i.e. argument to `__getitem__`).\n - items_original: If `randomize`, original item number.\n - errors: If `retry_exc` and NOT `silent`, log caught exception messages.\n - aug: If `use_aug`, child class should add a list of the aug performed.\n - data_timer: Timing information for current item.\n\n Additional features/utilities provided include:\n - A logger to be used for logging.\n - A timer which, if enabled, times load/augment for an item. Can also be used in the child class.\n - Automatic retrying if the current item fails to load. This aims to replace "hacky"\n methods for manually filtering/blacklisting items, whilst being easy to enable & customize.\n - Functionality to limit the dataset length via the `max_len` argument.\n - Ability to randomize dataset loading order. (Should probably only be used when combined with `max_len`).\n - Argument validation via `validate_args`.\n - Tools for visualizing/playing the dataset to inspect and sanity check it.\n\n Loading pipeline:\n The process of loading items from the dataset is separated into four components.\n - Loading: Performs the actual loading from the dataset & arranges into (x, y, m).\n - Augmentation: (Optional) Randomly augments the loaded data.\n - Transform: Fixed pre-processing required for training (typically, image normalization).\n - ToTorch: (Optional) Convert all loaded items into torch tensors and permute into channel-first format.\n\n Loading works based on a `datum` system, where the user provides a list of data types to load. Child classes are\n required to implement functions to load each of these data types (as `load_<data_type>) and add any required\n metadata (`add_metadata`).\n\n Classes are also required to provide a class attribute `VALID_DATUM` that lists the valid datatypes that can\n be loaded. Additionally, `items_data` should be a list containing the data required to load each item. This\n provides the default implementation of `len` and `num_items`.\n\n Batch collating:\n The dataset also provides a `collate_fn` that should be forwarded when creating a DataLoader from a dataset.\n The default implementation falls back on the PyTorch default, but can be overriden if necessary.\n >>> ds = MyCustomDataset(mode=\'train\', datum=[\'image\', \'label\'])\n >>> dl = DataLoader(ds, batch_size=4, num_workers=4, collate_fn=ds.collate_fn)\n\n Automatic retrying:\n Functionality to ignore loading exceptions and load a random item is provided on a per class (rather than per\n instance) basis. This is specified when declaring the class:\n >>> class MyCustomDataset(BaseDataset, retry_exc=(FileNotFoundError, KeyError), max_retries=10): pass\n\n Paramers:\n :param datum: (list[str]) Datatypes to load.\n :param use_aug: (bool) If `True`, call \'self.augment\' during __getitem__.\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param max_len: (None|int) Max number of items to load. Combine with `randomize` to limit epoch duration.\n :param randomize: (bool) If `True`, randomize the item number when loading.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n\n Attributes:\n :attr VALID_DATUM: (REQUIRED) (set[str]) Class attribute representing valid datatypes that can be loaded.\n :attr items_data: (REQUIRED) (list[Any]) List of data required to load each dataset item (e.g. image filenames).\n :attr logger: (Logger) Logger with parent CogvisDataset to use for logging.\n :attr timer: (MultiLevelTimer) If \'log_timings\', timer to use for timing blocks.\n\n Methods:\n :method __init_subclass__: Subclass initializer to create logger and wrap __getitem__ and __init__.\n :method __repr__: String representation containing parameters required to initialize dataset.\n :method validate_args: (OVERRIDE) Error checking for provided dataset configuration.\n :method collate_fn: Classmethod to collate multiple dataset items into a batch.\n :method __len__: Number of dataset items, adjusted by `max_len`. Do not modify!\n :method num_items: (OVERRIDE) Raw number of dataset items, based on `items_data`.\n :method is_valid: Helper to determine if the provided datatype is legal for this dataset.\n :method has: Helper to check if a datatype should be loaded by this dataset.\n :method get_load_fn: Retrieve the corresponding loading function for the provided datatype.\n :method __getitem__: Dataset item loading pipeline as (Load -> Augment -> Transform -> ToTorch). Do not modify!\n :method load: Load a single dataset item and arrange into (x, y, m) dicts.\n :method load_<datatype>: (REQUIRED) Loading function for each datatype provided in `datum`.\n :method add_metadata: (OVERRIDE) Add required item metadata based on the item data.\n :method augment: (OVERRIDE) Augment a dataset item.\n :method transform: (OVERRIDE) Fixed dataset item transforms to apply.\n :method to_torch: (OVERRIDE) Convert dataset to torch Tensors.\n :method create_axs: (OVERRIDE) Create axes required for plotting.\n :method show: (REQUIRED) Show a single dataset item.\n :method play: Iterate through dataset and display.\n '
_tagged = False
def __init__(self, datum: ty.S[str]=None, use_aug: bool=False, as_torch: bool=True, max_len: ty.N[int]=None, randomize: bool=False, log_time: bool=True):
self.datum = (datum or [])
self.as_torch = as_torch
self.use_aug = use_aug
self.log_time = log_time
self.max_len = max_len
self.randomize = randomize
self.items_data = None
if isinstance(self.VALID_DATUM, str):
self.VALID_DATUM = set(self.VALID_DATUM.split())
if isinstance(self.datum, str):
self.datum = self.datum.split()
self.timer = (MultiLevelTimer(name=self.__class__.__qualname__, as_ms=True, precision=4) if self.log_time else nullcontext)
def __init_subclass__(cls, retry_exc: ty.N[ty.U[(Exception, tuple[Exception])]]=None, silent: bool=False, max_retries: int=10, use_blacklist: bool=False, **kwargs):
'Subclass initializer to create logger and wrap __getitem__ and __init__.\n\n :param retry_exc: (None|Exception|tuple[Exception]) Exceptions to ignore and retry a different item.\n :param silent: (bool) If `False`, log error info to `meta`.\n :param max_retries: (None|int) Maximum number of retries for a single item.\n :param use_blacklist: (bool) If `True`, keep a list of items to avoid.\n :param kwargs: (dict) Kwargs required by parent classes (typically none required).\n '
super().__init_subclass__(**kwargs)
cls.logger = get_logger(f'BaseDataset.{cls.__qualname__}')
cls.__init__ = delegates(cls.__base__.__init__)(cls.__init__)
if ((not inspect.isabstract(cls)) and (not cls._tagged)):
cls._tagged = True
cls.__init__ = validated_init(cls.__init__)
cls.__getitem__ = retry_new_on_error(cls.__getitem__, exc=retry_exc, silent=silent, max=max_retries, use_blacklist=use_blacklist)
def __repr__(self) -> str:
'String representation containing parameters required to initialize dataset.'
sig = inspect.signature(self.__init__)
kw = {k: getattr(self, k) for k in sig.parameters if hasattr(self, k)}
kw = ', '.join((f'{k}={v}' for (k, v) in kw.items()))
return f'{self.__class__.__qualname__}({kw})'
def validate_args(self) -> None:
'Error checking for provided dataset configuration. Should always call parent `validate_args`.'
if (self.__class__.__len__ is not BaseDataset.__len__):
raise TypeError('Child datasets of `BaseDataset` should not override the `__len__` method, as this is used to implement the `max_len` feature. Classes should instead override the default implementation of `num_items` if required!')
if (self.items_data is None):
raise NotImplementedError('Dataset must provide `items_data` to use for loading...')
if (not self.datum):
raise ValueError('Must provide `datum` to load!')
ds = [d for d in self.datum if (not self.is_valid(d))]
if any(ds):
raise ValueError(f'Invalid data types. ({ds} vs. {self.VALID_DATUM})')
fns = [f for d in self.datum if (not hasattr(self, (f := f'load_{d}')))]
if any(fns):
raise NotImplementedError(f'Missing data loading functions: {fns}')
def log_args(self) -> None:
'Log creation arguments. Extend in child classes if required.'
self.logger.debug(f'Loading datum: {self.datum}...')
if self.use_aug:
self.logger.debug('Applying dataset augmentations...')
if self.log_time:
self.logger.debug('Logging dataset loading times...')
if self.max_len:
self.logger.debug(f'Restricting dataset to {self.max_len} items...')
if self.randomize:
self.logger.debug('Randomizing dataset item number loading...')
@classmethod
def collate_fn(cls, batch: ty.S[ty.BatchData]) -> ty.BatchData:
'Classmethod to collate multiple dataset items into a batch. Default uses PyTorch default collating.'
batch = io.tmap(default_collate, zip(*batch))
return batch
@ty.final
def __len__(self) -> int:
'Number of dataset items, adjusted by `max_len`. Do not modify!'
return (min(self.num_items(), self.max_len) if self.max_len else self.num_items())
def num_items(self) -> int:
'Raw number of dataset items, based on `items_data`. Override if required.'
return len(self.items_data)
@property
@abstractmethod
def VALID_DATUM(self) -> ty.U[(str, set)]:
'Set of valid datatypes that can be loaded. Must be provided as a class attribute.'
def is_valid(self, data_type: str) -> bool:
'Helper to determine if the provided datatype is legal for this dataset.'
return (data_type in self.VALID_DATUM)
def has(self, data_type: str) -> bool:
'Helper to check if a datatype should be loaded by this dataset.'
if (not self.is_valid(data_type)):
raise ValueError(f'Invalid data type. ({data_type} vs. {self.VALID_DATUM})')
return (data_type in self.datum)
def get_load_fn(self, data_type: str) -> ty.Callable:
'Retrieve the corresponding loading function for the provided datatype.'
if (not self.is_valid(data_type)):
raise ValueError(f'Invalid data type. ({data_type} vs. {self.VALID_DATUM})')
return getattr(self, f'load_{data_type}')
def __getitem__(self, item: int) -> ty.BatchData:
'Dataset item loading pipeline as (Load -> Augment -> Transform -> ToTorch). Do not modify!'
if (item >= len(self)):
raise IndexError
if self.randomize:
(iitem, item) = (item, torch.randint(self.num_items(), ()).item())
self.logger.debug(f'Randomized {iitem} into {item}...')
self.logger.debug(f'Loading item {item}...')
batch = (x, y, m) = ({}, {}, {'items': str(item)})
if self.randomize:
m['items_original'] = str(iitem)
if self.use_aug:
m['augs'] = ''
with self.timer('Total'):
with self.timer('Load'):
batch = self.load(item, batch)
if self.use_aug:
with self.timer('Augment'):
batch = self.augment(batch)
with self.timer('Transform'):
batch = self.transform(batch)
if self.as_torch:
with self.timer('ToTorch'):
(x, y, m) = batch = self.to_torch(batch)
if self.log_time:
m['timer_data'] = self.timer.copy()
self.logger.debug(str(self.timer))
self.timer.reset()
return batch
def load(self, item: int, batch: ty.BatchData) -> ty.BatchData:
'Load a single dataset item and arrange into (x, y, m) dicts. Should not require overriding.'
data = self.items_data[item]
batch = self.add_metadata(data, batch)
for d in self.datum:
self.logger.debug(f'Loading "{d}"...')
with self.timer(d.capitalize()):
batch = self.get_load_fn(d)(data, batch)
return batch
def add_metadata(self, data: ty.Any, batch: ty.BatchData) -> ty.BatchData:
'Add required item metadata based on the item data. Override if required.'
return batch
def augment(self, batch: ty.BatchData) -> ty.BatchData:
'Augment a dataset item. Override if required.'
return batch
def transform(self, batch: ty.BatchData) -> ty.BatchData:
'Fixed dataset item transforms to apply. Override if required.'
return batch
def to_torch(self, batch: ty.BatchData) -> ty.BatchData:
'Convert dataset to torch Tensors. Should not require overriding.'
return ops.to_torch(batch)
def create_axs(self) -> ty.Axes:
'Create axes required for displaying.'
(_, ax) = plt.subplots()
return ax
@abstractmethod
def show(self, batch: ty.BatchData, axs: ty.Axes) -> None:
'Show a single dataset item.'
def play(self, fps: float=30, skip: int=1, reverse: bool=False, fullscreen: bool=False, axs: ty.N[ty.Axes]=None, title: ty.N[ty.Callable[([int, ty.BatchData], str)]]=None) -> None:
'Iterate through dataset and display.\n\n :param fps: (int) Frames per second. (Likely not required).\n :param skip: (int) Gap between items.\n :param reverse: (bool) If `True`, iterate though items in reverse order.\n :param fullscreen: (bool) If `True` make figure fullscreen.\n :param axs: (None|Axes) Axes to display items on.\n :param title: (None|Callable) Optional function that accepts the item number and batch to return a title.\n :return:\n '
if self.as_torch:
raise ValueError('Dataset must not be in torch format when playing.')
axs = (self.create_axs() if (axs is None) else axs)
fig = plt.gcf()
if fullscreen:
fig.canvas.manager.full_screen_toggle()
if (title is None):
title = (lambda i, b: str(i))
items = (range((len(self) - 1), 0, (- skip)) if reverse else range(0, len(self), skip))
for i in tqdm(items):
(axs.cla() if isinstance(axs, plt.Axes) else [ax.cla() for ax in axs.flatten()])
batch = self[i]
self.show(batch, axs)
fig.suptitle(title(i, batch))
plt.pause((1 / fps))
plt.show(block=False)
|
class MdeBaseDataset(BaseDataset, retry_exc=ty.SuppImageNotFoundError):
'Base class used for Monocular Depth Estimation datasets.\n See the documentation from `BaseDataset` for additional information.\n\n Assumes most datasets provide:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (either monocular or stereo) used to compute photometric consistency losses.\n - Depth: Target ground-truth depth.\n - K: Camera intrinsic parameters.\n\n Datatypes can be added/removed by child classes as required by modifying class attribute `VALID_DATUM`.\n All child classes must provide a class attribute SHAPE, containing the original image resolution as (H, W).\n\n In general, the functions `load_<datatype>` load the corresponding item in the dataset and store it in the required\n dictionary with the desired key, e.g. images are added to `(x, y)`, while ground-truth depth is only added to `y`.\n To allow for slightly more modular loading, the actual interacting with the dataset devkit is further isolated to\n the `_load_<datatype>` functions. These are the ones that subclasses must implement.\n\n Parameters:\n :param shape: (int, int) Target image shape as (h, w).\n :param datum: (list[str]) List of datatypes to load.\n :param supp_idxs: (None|int|list[int]) Support frame indexes relative to the target frame.\n :param randomize_supp: (bool) If `True` randomize the support index for each item. (Same for all support frames)\n\n Attributes:\n :attr h: (int) Image height.\n :attr w: (int) Image width.\n :attr size: (int) Image size as (w, h).\n :attr SHAPE: (REQUIRED) (int, int) Original image shape as (h, w).\n :attr H: (int) Original image width.\n :attr W: (int) Original image width.\n :attr SIZE: (int) Original image size as (w, h).\n :attr prob_flip: (float) Probability to apply horizontal flipping augmentation.\n :attr prob_photo: (float) Probability to apply photometric jittering augmentation.\n\n Methods:\n :method load_image: Load target image and keep un-augmented copy. (x, y)\n :method _load_image: (REQUIRED) Load target image from dataset.\n :method load_support: Load all support frames (including stereo) and keep un-augmented copy. (x, y)\n :method _get_supp_scale: Generate the index of the support frame relative to the target image.\n :method _load_stereo_image: (REQUIRED) Load the support stereo frame from dataset.\n :method _load_stereo_T: (REQUIRED) Load the stereo transform to the stereo frame from dataset.\n :method load_depth: Load ground-truth depth and store in loss targets. (y)\n :method _load_depth: (REQUIRED) Load ground-truth depth from dataset.\n :method load_K: Load camera intrinsics and store in loss targets. (y)\n :method _load_K: (REQUIRED) Load camera intrinsics from dataset.\n :method apply_flip_aug: Apply horizontal flipping augmentation.\n :method apply_photo_aug: Apply colour jittering augmentation to `x`.\n '
VALID_DATUM = 'image support depth K'
def __init__(self, shape: tuple[(int, int)]=None, datum: ty.U[(str, ty.S[str])]='image K', supp_idxs: ty.N[ty.U[(int, ty.S[int])]]=None, randomize_supp: bool=False, augmentations=None, **kwargs):
super().__init__(datum=datum, **kwargs)
self.shape = (shape or self.SHAPE)
self.supp_idxs = (supp_idxs or [])
self.randomize_supp = randomize_supp
self.should_resize = (shape is not None)
self.augmentations = (augmentations or {})
if isinstance(self.supp_idxs, int):
self.supp_idxs = [self.supp_idxs]
self.prob_flip = self.augmentations.get('flip', (0 if self.augmentations else 0.5))
self.prob_photo = self.augmentations.get('photo', (0 if self.augmentations else 0.5))
self.photo = ka.ColorJiggle(brightness=(0.8, 1.2), contrast=(0.8, 1.2), saturation=(0.8, 1.2), hue=((- 0.1), 0.1), p=1.0, same_on_batch=True, keepdim=True)
self.flip = (lambda arr, axis=1: np.ascontiguousarray(np.flip(arr, axis=axis)))
@property
def h(self) -> int:
'Image height.'
return self.shape[0]
@property
def w(self) -> int:
'Image width.'
return self.shape[1]
@property
def size(self) -> tuple[(int, int)]:
'Image size as (w, h).'
return (self.w, self.h)
@property
@abstractmethod
def SHAPE(self) -> tuple[(int, int)]:
'Original image shape as (H, W).'
@property
def H(self) -> int:
'Original image height.'
return self.SHAPE[0]
@property
def W(self) -> int:
'Original image width.'
return self.SHAPE[1]
@property
def SIZE(self) -> tuple[(int, int)]:
'Original image size as (W, H).'
return (self.W, self.H)
def log_args(self) -> None:
'Log creation arguments.'
self.logger.info(f"Loading images with shape '{self.shape}'...")
if self.supp_idxs:
self.logger.debug(f"Logging '{self.supp_idxs}' support frames...")
if self.randomize_supp:
self.logger.debug(f'Randomizing index of support frames...')
super().log_args()
def validate_args(self) -> None:
'Error checking for provided dataset configuration.'
super().validate_args()
if (self.h > self.w):
self.logger.warning(f'Image height={self.h} is greater than image width={self.w}. Did you pass these in the correct order? Expected (height, width).')
if (self.H > self.W):
self.logger.warning(f'Image full height={self.H} is greater than image width={self.W}. Did you pass these in the correct order? Expected SHAPE as (Height, Width).')
for i in self.supp_idxs:
if (self.randomize_supp and (abs(i) not in {0, 1})):
raise ValueError(f'Invalid supplementary index when randomizing. ({i} vs. {{+1, 0, -1}} )')
if (self.supp_idxs and (not self.has('support'))):
raise ValueError('Support indexes were provided, but `support` was not found in `datum`.')
if (self.has('support') and (not self.supp_idxs)):
raise ValueError('Support images were requested, but no indexes were provided.')
@classmethod
def collate_fn(cls, batch: ty.S[ty.BatchData]) -> ty.BatchData:
"Classmethod to collate multiple dataset items into a batch.\n\n `x['supp_idxs']` is converted into a single Tensor, since indexes are the same for all items. (n, )\n `x['supp_imgs']` are transposed to reflect the number of support images as the first dimension. (n, b, ...)\n `y['supp_imgs']` as above. (n, b, ...)\n\n :param batch: (ty.BatchData) List of dataset items, each with `(x, y, m)`.\n :return: (Dataset) Collated batch, where all items are stacked into a single tensor.\n "
(x, y, m) = super().collate_fn(batch)
if ('supp_idxs' in x):
x['supp_idxs'] = x['supp_idxs'][0]
x['supp_imgs'] = x['supp_imgs'].transpose(0, 1)
y['supp_imgs'] = y['supp_imgs'].transpose(0, 1)
return (x, y, m)
def load_image(self, data: ty.Any, batch: ty.BatchData) -> ty.BatchData:
'Load target image and keep un-augmented copy. (x, y)'
(x, y, m) = batch
img = self._load_image(data)
x['imgs'] = io.pil2np(img)
y['imgs'] = x['imgs'].copy()
return batch
@abstractmethod
def _load_image(self, data: ty.Any, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
def load_support(self, data: ty.Any, batch: ty.BatchData) -> ty.BatchData:
'Load all support frames (including stereo) and keep un-augmented copy. (x, y)'
(x, y, m) = batch
x['supp_idxs'] = np.array(self.supp_idxs)
(supp, k) = ([], self.get_supp_scale(data))
m['supp'] = str(k)
for i in self.supp_idxs:
i *= k
self.logger.debug(f'Loading support image: {i}')
if (i == 0):
supp.append(self._load_stereo_image(data))
y['T_stereo'] = self._load_stereo_T(data)
else:
supp.append(self._load_image(data, offset=i))
x['supp_imgs'] = np.stack([io.pil2np(img) for img in supp])
y['supp_imgs'] = x['supp_imgs'].copy()
return batch
def get_supp_scale(self, data: ty.Any) -> int:
'Generate the index of the support frame relative to the target image.'
return 1
@abstractmethod
def _load_stereo_image(self, data: ty.Any) -> Image:
'Load the support stereo frame from dataset.'
@abstractmethod
def _load_stereo_T(self, data: ty.Any) -> ty.A:
'Load the stereo transform to the stereo frame from dataset.'
def load_depth(self, data: ty.Any, batch: ty.BatchData) -> ty.BatchData:
'Load ground-truth depth and store in loss targets. (y)'
batch[1]['depth'] = self._load_depth(data)
return batch
@abstractmethod
def _load_depth(self, data: ty.Any) -> ty.A:
'Load ground-truth depth from dataset.'
def load_K(self, data: ty.Any, batch: ty.BatchData) -> ty.BatchData:
'Load camera intrinsics and store in loss targets. (y)'
batch[1]['K'] = self._load_K(data)
return batch
@abstractmethod
def _load_K(self, data: ty.Any) -> ty.A:
'Load camera intrinsics from dataset.'
def augment(self, batch: ty.BatchData) -> ty.BatchData:
'Augment a dataset item. Currently supported are "horizontal flipping" and "colour jittering".'
if (random.random() <= self.prob_flip):
batch = self.apply_flip_aug(batch)
if (random.random() <= self.prob_photo):
batch = self.apply_photo_aug(batch)
return batch
def apply_flip_aug(self, batch: ty.BatchData) -> ty.BatchData:
'Apply horizontal flipping augmentation. All images are flipped, including "non-augmented" version in `y`.'
(x, y, m) = batch
self.logger.debug('Triggered Augmentation: Horizontal flip')
m['augs'] += '[FlipLR]'
(x['imgs'], y['imgs']) = (self.flip(x['imgs']), self.flip(y['imgs']))
if self.supp_idxs:
(x['supp_imgs'], y['supp_imgs']) = (self.flip(x['supp_imgs'], axis=(- 2)), self.flip(y['supp_imgs'], axis=(- 2)))
if ('T_stereo' in y):
y['T_stereo'][(0, 3)] *= (- 1)
if ('depth' in y):
y['depth'] = self.flip(y['depth'])
return batch
def apply_photo_aug(self, batch: ty.BatchData) -> ty.BatchData:
'Apply colour jittering augmentation to `x`. The same jittering is applied to target and support.'
(x, y, m) = batch
self.logger.debug('Triggered Augmentation: Photometric')
m['augs'] += '[Photo]'
imgs = x['imgs'][None]
if self.supp_idxs:
imgs = np.concatenate((imgs, x['supp_imgs']))
imgs = ops.to_np(self.photo(ops.to_torch(imgs)))
x['imgs'] = imgs[0]
if self.supp_idxs:
x['supp_imgs'] = imgs[1:]
return batch
def transform(self, batch: ty.BatchData) -> ty.BatchData:
'Apply ImageNet standarization to `x`.'
x = batch[0]
x['imgs'] = ops.standardize(x['imgs'])
if self.supp_idxs:
x['supp_imgs'] = ops.standardize(x['supp_imgs'])
return batch
def create_axs(self) -> ty.Axes:
'Create axes required for displaying.'
(_, axs) = plt.subplots(((1 + len(self.supp_idxs)) + ('depth' in self.datum)))
if isinstance(axs, plt.Axes):
axs = np.array([axs])
plt.tight_layout()
return axs
def show(self, batch: ty.BatchData, axs: ty.Axes) -> None:
'Show a single dataset item.'
(x, y, m) = batch
use_aug = True
d = (x if use_aug else y)
i = 0
axs[i].imshow((ops.unstandardize(d['imgs']) if use_aug else d['imgs']))
if self.supp_idxs:
for (ax, im) in zip(axs[1:], d['supp_imgs']):
i += 1
ax.imshow((ops.unstandardize(im) if use_aug else im))
if ('depth' in y):
i += 1
axs[i].imshow(viz.rgb_from_disp(y['depth'], invert=True))
|
@register('ddad')
class DdadDataset(MdeBaseDataset):
'DDAD Dataset. From: https://arxiv.org/abs/1905.02693.\n\n This dataset is a simple wrapper over the official `SynchronizedSceneDataset` provided by the DGP repo\n (https://github.com/TRI-ML/dgp, downloaded to `/PATH/TO/ROOT/src/external_libs/dgp`).\n\n The current implementation focuses exclusively on using DDAD as a testing dataset and should not be used for\n training. By default, we also limit the dataset to the first 1000 items.\n\n Datum:\n image: Target image from which to predict depth.\n depth: Target ground-truth Lidar depth.\n K: Camera intrinsic parameters.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n depth: (Tensor) (1, h, w) Ground-truth target depth (either Benchmark or LiDAR)\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {}\n\n Parameters:\n :param mode: (str) Dataset split mode to load. {val}\n :param kwargs: (dict) Kwargs accepted by `BaseDataset` and `MdeBaseDataset`.\n '
VALID_DATUM = 'image depth K'
SHAPE = (1216, 1936)
def __init__(self, mode: str, **kwargs):
super().__init__(**kwargs)
self.mode = mode
self.max_len = 1000
(self.split_file, self.items_data) = self.parse_items()
def log_args(self) -> None:
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if (self.mode != 'val'):
raise ValueError('DDAD is a testing dataset. Only a `val` split is provided.')
if self.use_aug:
raise ValueError('DDAD is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('DDAD does not provide support frames.')
if self.randomize_supp:
raise ValueError('DDAD does not provide support frames.')
def parse_items(self) -> tuple[(Path, ddad.SynchronizedSceneDataset)]:
file = ddad.get_json_file()
datum = (['camera_01'] + (['lidar'] if ('depth' in self.datum) else []))
ds = ddad.get_dataset(self.mode, datum=datum)
return (file, ds)
def _load_image(self, data: ty.Any, offset: int=0) -> Image:
img = data[0][0]['rgb']
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: ty.Any) -> ty.A:
depth = data[0][0]['depth']
return depth
def _load_K(self, data: ty.Any) -> ty.A:
K = geo.pad_K(data[0][0]['intrinsics'])
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def get_supp_scale(self, data: ty.Any) -> ty.A:
raise NotImplementedError('DDAD does not provide support frames.')
def _load_stereo_image(self, data: ty.Any) -> ty.A:
raise NotImplementedError('DDAD does not provide stereo frames.')
def _load_stereo_T(self, data: ty.Any) -> ty.A:
raise NotImplementedError('DDAD does not provide stereo frames.')
|
def validated_init(__init__: ty.Callable):
'Decorator to ensure a BaseDataset child always calls argument validation after init.'
@wraps(__init__)
def wrapper(self, *args, **kwargs) -> None:
self.logger.info(f"Creating '{self.__class__.__qualname__}'...")
__init__(self, *args, **kwargs)
self.log_args()
self.validate_args()
return wrapper
|
@opt_args_deco
def retry_new_on_error(__getitem__: ty.Callable, exc: ty.U[(BaseException, ty.S[BaseException])]=Exception, silent: bool=False, max: ty.N[int]=None, use_blacklist: bool=False) -> ty.Callable:
'Decorator to wrap a BaseDataset __getitem__ function and retry a different item if there is an error.\n\n The idea is to provide a way of ignoring missing/corrupt data without having to blacklist files,\n change number of items and do "hacky" workarounds.\n Obviously, the less data we have, the less sense this decorator makes, since we\'ll start duplicating more\n and more items (although if we\'re augmenting our data, it shouldn\'t be too tragic).\n Obviously as well, for debugging/evaluation it probably makes more sense to disable this decorator.\n\n NOTE: This decorator assumes we follow the BaseDataset format\n - We return three dicts (x, y, m)\n - Errors are logged in meta[\'errors\']\n - A \'log_timings\' flag indicates the presence of a \'MultiLevelTimer\' in self.timer\n\n :param __getitem__: (ty.Callable) Dataset `__getitem__` method to decorate.\n :param exc: (Exception|tuple[Exception]) Expected exceptions to catch and retry on.\n :param silent: (bool) If `False`, log error info to `meta`.\n :param max: (None|int) Maximum number of retries for a single item.\n :param use_blacklist: (bool) If `True`, keep a list of items to avoid.\n :return: (BatchData) Batch returned by `__getitem__`.\n '
n = 0
blacklist = set()
exc = (exc or tuple())
if isinstance(exc, list):
exc = tuple(exc)
@wraps(__getitem__)
def wrapper(self, item: int) -> ty.BatchData:
nonlocal n
try:
(x, y, m) = __getitem__(self, item)
if ((not silent) and ('errors' not in m)):
m['errors'] = ''
except exc as e:
n += 1
if (max and (n >= max)):
raise RuntimeError('Exceeded max retries when loading dataset item...')
if use_blacklist:
blacklist.add(item)
if self.log_time:
self.timer.reset()
new = item
while ((new == item) or (new in blacklist)):
new = random.randrange(len(self))
(x, y, m) = wrapper(self, new)
if (not silent):
m['errors'] += f"{(' - ' if m['errors'] else '')}{(item, e)}"
n = 0
return (x, y, m)
return wrapper
|
@register('diode')
class DiodeDataset(MdeBaseDataset):
VALID_DATUM = 'image depth mask'
SHAPE = (768, 1024)
def __init__(self, scene: str, mode: str, datum='image depth mask', **kwargs):
super().__init__(datum=datum, **kwargs)
self.scene = scene
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Split: '{self.scene}' - Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if self.use_aug:
raise ValueError('Diode is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('Diode does not provide support frames.')
if self.randomize_supp:
raise ValueError('Diode does not provide support frames.')
def parse_items(self) -> tuple[(Path, ty.S[di.Item])]:
file = di.Item.get_split_file(self.mode, self.scene)
data = di.Item.load_split(self.mode, self.scene)
return (file, data)
def _load_image(self, data: di.Item, offset: int=0) -> Image:
img = data.load_img()
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: di.Item) -> ty.A:
return data.load_depth()
def load_mask(self, data: di.Item, batch: ty.BatchData) -> ty.BatchData:
batch[1]['mask'] = self._load_mask(data)
return batch
def _load_mask(self, data: di.Item) -> ty.A:
return data.load_mask()
def get_supp_scale(self, data: di.Item) -> ty.A:
raise NotImplementedError('Diode does not provide support frames.')
def _load_K(self, data: di.Item) -> ty.A:
raise NotImplementedError('Diode does not provide camera intrinsics.')
def _load_stereo_image(self, data: di.Item) -> ty.A:
raise NotImplementedError('Diode does not provide stereo frames.')
def _load_stereo_T(self, data: di.Item) -> ty.A:
raise NotImplementedError('Diode does not provide stereo frames.')
|
@register('kitti')
class KittiRawDataset(MdeBaseDataset):
'Kitti Raw dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (either monocular or stereo) used to compute photometric consistency losses.\n - Depth: Target ground-truth benchmark depth (corrected LiDAR).\n - Depth Velo: Target ground-truth velodyne depth (raw LiDAR).\n - Depth Hint: Hand-crafted fused SGBM depth estimates.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for additional added metadata.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n supp_idxs: (Tensor) (n,) Indexes of the support frames relative to target.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Non-augmented support frames.\n depth: (Tensor) (1, h, w) Ground-truth target depth (either Benchmark or LiDAR)\n depth_hints: (Tensor) (1, h, w) Fused SGBM depth hints.\n T_stereo: (Tensor) (4, 4) Transform to the stereo support pair.\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {\n stem: (str) Path to loaded item.\n supp: (str) Support frame multiplier.\n }\n\n Parameters:\n :param split: (str) Kitti split to use. {eigen, eigen_zhou, eigen_benchmark, odometry...}\n :param mode: (str) Training mode to use. {train, val, test}\n\n Attributes:\n :attr K: (ty.A) (4, 4) Averaged camera intrinsics, normalized based on `shape`.\n :attr split_file: (Path) File containing the list of items in the loaded split.\n :attr items_data: (list[kr.Item]) List of dataset items as (seq, cam, stem).\n '
VALID_DATUM = 'image support depth depth_velo depth_hint K'
SHAPE = (376, 1242)
def __init__(self, split: str, mode: str, **kwargs):
super().__init__(**kwargs)
self.split = split
self.mode = mode
self._cam2sign = {'image_02': (- 1), 'image_03': 1}
self._side2cam = {'l': 'image_02', 'r': 'image_03'}
self._cam2stereo = {'image_02': 'image_03', 'image_03': 'image_02'}
self.K = geo.resize_K(np.array([[0.58, 0, 0.5, 0], [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32), new_shape=self.shape)
(self.split_file, self.items_data) = self.parse_items()
self.should_resize = True
def log_args(self):
self.logger.info(f"Split: '{self.split}' - Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
'Error checking for provided dataset configuration.'
super().validate_args()
if (self.has('depth') and self.has('depth_velo')):
raise ValueError('Must provide only one source of depth. (`depth`: Corrected LiDAR, `depth_velo`: LiDAR)')
def parse_items(self) -> tuple[(Path, ty.S[kr.Item])]:
'Helper to parse dataset items.'
file = kr.get_split_file(self.split, self.mode)
lines = [line.split() for line in kr.load_split(file)]
items = [kr.Item(line[0], int(line[1]), self._side2cam[line[2]]) for line in lines]
return (file, items)
def add_metadata(self, data: kr.Item, batch: ty.BatchData) -> ty.BatchData:
'Add item metadata.'
batch[2]['stem'] = f'{data.seq}/{data.cam}/{data.stem:010}'
return batch
def _load_image(self, data: kr.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
file = kr.get_image_file(data.seq, data.cam, (data.stem + offset))
if (not file.is_file()):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{file}" with "offset={offset!r}"')
img = Image.open(file)
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def get_supp_scale(self, data: kr.Item) -> int:
'Generate the index of the support frame relative to the target image.'
if (not self.randomize_supp):
return 1
return 1
def _load_stereo_image(self, data: kr.Item) -> Image:
'Load the support stereo frame from dataset.'
data = kr.Item(data.seq, data.stem, self._cam2stereo[data.cam])
img = self._load_image(data)
return img
def _load_stereo_T(self, data: kr.Item) -> ty.A:
'Load the stereo transform to the stereo frame from dataset.'
T = np.eye(4, dtype=np.float32)
T[(0, 3)] = (self._cam2sign[data.cam] * 0.1)
return T
def _load_depth(self, data: kr.Item) -> ty.A:
'Load ground-truth benchmark depth from dataset (corrected LiDAR).'
file = kr.get_depth_file(data.seq, data.cam, data.stem)
if (not file.is_file()):
raise FileNotFoundError(f'Could not find specified depth benchmark file "{file}".')
depth = kr.load_depth(file)
if self.should_resize:
depth = skit.resize(depth, self.SHAPE, order=0, preserve_range=True, mode='constant')
return depth
def load_depth_velo(self, data: kr.Item, batch: ty.BatchData) -> ty.BatchData:
'Load ground-truth velodyne depth and store in loss targets. (y)'
batch[1]['depth'] = self._load_depth_velo(data)
return batch
def _load_depth_velo(self, data: kr.Item) -> ty.A:
'Load ground-truth velodyne depth from dataset (raw LiDAR).'
file = kr.get_velodyne_file(data.seq, data.stem)
if (not file.is_file()):
raise FileNotFoundError(f'Could not find specified depth LiDAR file "{file}".')
seq = data.seq.split('/')[0]
(cam2cam, _, velo2cam) = kr.load_calib(seq)
depth = kr.load_depth_velodyne(file, velo2cam, cam2cam, cam=int(data.cam[(- 2):]))
if self.should_resize:
depth = skit.resize(depth, self.SHAPE, order=0, preserve_range=True, mode='constant')
return depth
def load_depth_hint(self, data: kr.Item, batch: ty.BatchData) -> ty.BatchData:
'Load fused SGBM depth hints and store in loss targets. (y)'
batch[1]['depth_hints'] = self._load_depth_hint(data)
return batch
def _load_depth_hint(self, data: kr.Item) -> ty.A:
'Load fused SGBM depth hints from dataset'
file = kr.get_hint_file(data.seq, data.cam, data.stem)
if (not file.is_file()):
raise FileNotFoundError(f'Could not find specified depth hint file "{file}".')
depth = np.load(file)
if self.should_resize:
depth = cv2.resize(depth, dsize=self.size, interpolation=cv2.INTER_NEAREST)
return depth[(..., None)]
def _load_K(self, data: kr.Item) -> ty.A:
'Load camera intrinsics from dataset.'
return self.K
def apply_flip_aug(self, batch: ty.BatchData) -> ty.BatchData:
'Apply horizontal flipping augmentation.'
batch = super().apply_flip_aug(batch)
y = batch[1]
if ('depth_hints' in y):
y['depth_hints'] = self.flip(y['depth_hints'])
return batch
def create_axs(self) -> ty.Axes:
'Create axes required for displaying.'
n = (((1 + len(self.supp_idxs)) + (self.has('depth') or self.has('depth_velo'))) + self.has('depth_hint'))
(_, axs) = plt.subplots(n)
if isinstance(axs, plt.Axes):
axs = np.array([axs])
plt.tight_layout()
return axs
def show(self, batch: ty.BatchData, axs: ty.Axes) -> None:
'Show a single dataset item.'
super().show(batch, axs)
y = batch[1]
if ('depth_hints' in y):
axs[(- 1)].imshow(viz.rgb_from_disp(y['depth_hints'], invert=True))
|
@register('kitti_lmdb')
class KittiRawLmdbDataset(KittiRawDataset):
'Kitti Raw dataset using LMDBs. See `KittiRawDataset` for additional details.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_dbs = {}
self.depth_dbs = {}
self.poses_dbs = {}
self.hints_dbs = {}
self.calib_dbs = {}
self.preload()
def parse_items(self) -> tuple[(Path, ty.S[kr.Item])]:
'Helper to parse dataset items.'
file = kr.get_split_file(self.split, self.mode)
lines = [line.split() for line in kr.load_split(file)]
items = [kr.Item(line[0], int(line[1]), self._side2cam[line[2]]) for line in lines]
return (file, items)
def preload(self) -> None:
'Create all LMDBs for the required items.'
drives = set((item.seq for item in self.items_data))
for d in drives:
self.image_dbs[f'{d}/image_02'] = kr.load_images(*d.split('/'), 'image_02')
self.image_dbs[f'{d}/image_03'] = kr.load_images(*d.split('/'), 'image_03')
if self.has('depth'):
for d in drives:
self.depth_dbs[f'{d}/image_02'] = kr.load_depths(*d.split('/'), 'image_02')
self.depth_dbs[f'{d}/image_03'] = kr.load_depths(*d.split('/'), 'image_03')
if self.has('depth_velo'):
seqs = set((seq.split('/')[0] for seq in drives))
self.calib_dbs = {s: kr.load_calib(s) for s in seqs}
for d in drives:
(seq, drive) = d.split('/')
self.depth_dbs[d] = kr.load_velo_depths(seq, drive, self.calib_dbs[seq])
if self.has('depth_hint'):
for d in drives:
self.hints_dbs[f'{d}/image_02'] = kr.load_hints(*d.split('/'), 'image_02')
self.hints_dbs[f'{d}/image_03'] = kr.load_hints(*d.split('/'), 'image_03')
def _load_image(self, data: kr.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
k = f'{(data.stem + offset):010}'
kdb = f'{data.seq}/{data.cam}'
db = self.image_dbs[kdb]
if (k not in db):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{kdb}/{k}" with "offset={offset!r}"')
img = db[k]
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: kr.Item) -> ty.A:
'Load ground-truth benchmark depth from dataset (corrected LiDAR).'
k = f'{data.stem:010}'
kdb = f'{data.seq}/{data.cam}'
depth = self.depth_dbs[kdb][k]
if self.should_resize:
depth = skit.resize(depth, self.SHAPE, order=0, preserve_range=True, mode='constant')
return depth[(..., None)]
def _load_depth_velo(self, data: kr.Item) -> ty.A:
'Load ground-truth velodyne depth from dataset (raw LiDAR).'
k = (f'{data.stem:010}', int(data.cam[(- 2):]))
kdb = data.seq
depth = self.depth_dbs[kdb][k]
if self.should_resize:
depth = skit.resize(depth, self.SHAPE, order=0, preserve_range=True, mode='constant')
return depth[(..., None)]
def _load_depth_hint(self, data: kr.Item) -> ty.A:
'Load fused SGBM depth hints and store in loss targets. (y)'
k = f'{data.stem:010}'
kdb = f'{data.seq}/{data.cam}'
depth = self.hints_dbs[kdb][k]
if self.should_resize:
depth = cv2.resize(depth, dsize=self.size, interpolation=cv2.INTER_NEAREST)
return depth[(..., None)]
|
@register('mannequin')
class MannequinDataset(MdeBaseDataset):
'Mannequin Challenge dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - Depth: Target ground-truth COLMAP depth.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for additional added metadata.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n supp_idxs: (Tensor) (n,) Indexes of the support frames relative to target.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n depth: (Tensor) (1, h, w) Ground-truth target depth.\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {\n supp: (str) Support frame multiplier.\n }\n\n Parameters:\n :param mode: (str) Training mode to use. {train, val, test}\n\n Attributes:\n :attr split_file: (Path) File containing the list of items in the loaded split.\n :attr items_data: (list[mc.Item]) List of dataset items as (seq, cam, stem).\n '
VALID_DATUM = 'image support depth K'
SHAPE = (1080, 1920)
def __init__(self, mode: str, **kwargs):
super().__init__(**kwargs)
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
'Error checking for provided dataset configuration.'
super().validate_args()
if (0 in self.supp_idxs):
raise ValueError('MannequinChallenge does not provide stereo pairs.')
def parse_items(self) -> tuple[(Path, ty.S[mc.Item])]:
'Helper to parse dataset items.'
return mc.load_split(self.mode)
def add_metadata(self, data: mc.Item, batch: ty.BatchData) -> ty.BatchData:
batch[2]['seq'] = data.seq
return batch
def _load_image(self, data: mc.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
file = mc.get_img_file(mode=self.mode, seq=data.seq, stem=(int(data[1]) + offset))
if (not file.is_file()):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{file}" with "offset={offset!r}"')
img = Image.open(file)
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def get_supp_scale(self, data: mc.Item) -> int:
'Generate the index of the support frame relative to the target image.'
if (not self.randomize_supp):
return 1
k = random.randint(1, 5)
return k
def _load_depth(self, data: mc.Item) -> None:
'Load ground-truth depth from dataset.'
file = mc.get_depth_file(self.mode, data.seq, data.stem)
if (not file.is_file()):
raise FileNotFoundError(f'Could not find specified depth file "{file}".')
depth = np.load(file)
if self.should_resize:
depth = cv2.resize(depth, dsize=self.size, interpolation=cv2.INTER_NEAREST)
return depth
def _load_K(self, data: mc.Item) -> ty.A:
'Load camera intrinsics from dataset.'
info = mc.load_info(self.mode, data.seq)[data.stem]
sh = [(i + 1) for i in info['shape']]
K = info['K']
if self.should_resize:
K = geo.resize_K(K, self.shape, shape=sh)
return K
def _load_stereo_image(self, data: mc.Item) -> None:
raise NotImplementedError('MannequinChallenge does not contain stereo pairs.')
def _load_stereo_T(self, data: mc.Item) -> None:
raise NotImplementedError('MannequinChallenge does not contain stereo pairs.')
|
@register('mannequin_lmdb')
class MannequinLmdbDataset(MannequinDataset):
'Mannequin Challenge dataset using LMDBs. See `MannequinDataset` for additional details.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.img_db = mc.load_imgs(self.mode)
self.depth_db = (mc.load_depths(self.mode) if self.has('depth') else None)
self.K_db = mc.load_intrinsics(self.mode)
self.shape_db = mc.load_shapes(self.mode)
def parse_items(self) -> tuple[(Path, ty.S[mc.Item])]:
'Helper to parse dataset items.'
return mc.load_split(self.mode)
def _load_image(self, data: mc.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
k = f'{data.seq}/{(int(data.stem) + offset):05}'
if (k not in self.img_db):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{k}" with "offset={offset!r}"')
img = self.img_db[k]
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: mc.Item) -> None:
'Load ground-truth depth from dataset.'
k = f'{data.seq}/{data.stem}'
if (k not in self.img_db):
raise FileNotFoundError(f'Could not find specified file "{k}"')
depth = self.depth_db[k]
if self.should_resize:
depth = cv2.resize(depth, dsize=self.size, interpolation=cv2.INTER_NEAREST)
return depth
def _load_K(self, data: mc.Item) -> ty.A:
'Load camera intrinsics from dataset.'
K = self.K_db[data.seq]
if self.should_resize:
K = geo.resize_K(K, self.shape, shape=self._load_shape(data))
return K
def _load_shape(self, data: mc.Item) -> ty.S[int]:
'Load original image shape from dataset.'
sh = self.shape_db[data.seq]
sh = [(i + 1) for i in sh]
return sh
|
@register('mapfree')
class MapFreeRelocDataset(MdeBaseDataset):
'MapFreeReloc dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - Pose: Camera extrinsic parameters.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for additional added metadata.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n supp_idxs: (Tensor) (n,) Indexes of the support frames relative to target.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Non-augmented support frames.\n T: (Tensor) (4, 4) Camera extrinsics.\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {\n stem: (str) Path to loaded item.\n supp: (str) Support frame multiplier.\n }\n\n Parameters:\n :param mode: (str) Training mode to use. {train, val, test}\n\n Attributes:\n :attr mode: (str) Training mode used.\n :attr split_file: (Path) File containing the list of items in the loaded split.\n :attr items_data: (list[mfr.Item]) List of dataset items as (scene, seq, stem).\n '
VALID_DATUM = 'image support depth pose K'
SHAPE = (720, 540)
def __init__(self, mode: str, depth_src: str='dptkitti', **kwargs):
super().__init__(**kwargs)
self.mode = mode
self.depth_src = depth_src
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
super().validate_args()
if (('depth' in self.datum) and (self.mode == 'train')):
raise ValueError('Depth maps are not available for the MapFreeReloc training split.')
if (self.supp_idxs and (0 in self.supp_idxs)):
raise ValueError('Stereo support frames are not provided by MapFreeReloc.')
def parse_items(self) -> tuple[(Path, ty.S[mfr.Item])]:
file = mfr.Item.get_split_file(self.mode)
data = mfr.Item.load_split(self.mode)
return (file, data)
def get_supp_scale(self, data: mfr.Item) -> int:
if (not self.randomize_supp):
return 1
k = random.randint(1, 5)
return k
def add_metadata(self, data: mfr.Item, batch: ty.BatchData) -> ty.BatchData:
batch[2]['mode'] = self.mode
batch[2]['scene'] = data.scene
batch[2]['seq'] = data.seq
batch[2]['stem'] = data.stem
return batch
def load_pose(self, data: mfr.Item, batch: ty.BatchData) -> ty.BatchData:
batch[1]['T'] = self._load_pose(data)
return batch
def _load_image(self, data: mfr.Item, offset: int=0) -> Image:
if offset:
stem = (int(data.stem.split('_')[1]) + offset)
data = mfr.Item(self.mode, data.scene, data.seq, f'frame_{stem:05d}')
if (not data.get_img_file().is_file()):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{data.scene}/{data.seq}/{data.stem}" with "offset={offset!r}"')
img = data.load_img()
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: mfr.Item) -> ty.A:
depth = data.load_depth(self.depth_src)
if self.should_resize:
skit.resize(depth, self.SHAPE, order=0, preserve_range=True, mode='constant')
return depth
def _load_pose(self, data: mfr.Item) -> ty.A:
return data.load_pose()
def _load_K(self, data: mfr.Item) -> ty.A:
K = data.load_intrinsics()
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def _load_stereo_image(self, data: mfr.Item) -> Image:
raise NotImplementedError('MapFreeReloc does not provide stereo images.')
def _load_stereo_T(self, data: mfr.Item) -> ty.A:
raise NotImplementedError('MapFreeReloc does not provide stereo images.')
|
@register('nyud')
class NyudDataset(MdeBaseDataset):
VALID_DATUM = 'image depth'
SHAPE = (480, 640)
def __init__(self, mode: str, datum: ty.U[(str, ty.S[str])]='image depth', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if self.use_aug:
raise ValueError('NYUD-v2 is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('NYUD-v2 does not provide support frames.')
if self.randomize_supp:
raise ValueError('NYUD-v2 does not provide support frames.')
def parse_items(self) -> tuple[(Path, ty.S[nyud.Item])]:
file = nyud.Item.get_split_file(self.mode)
data = nyud.Item.load_split(self.mode)
return (file, data)
def _load_image(self, data: nyud.Item, offset: int=0) -> Image:
img = data.load_img()
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: nyud.Item) -> ty.A:
return data.load_depth()
def get_supp_scale(self, data: nyud.Item) -> ty.A:
raise NotImplementedError('NYUD-v2 does not provide support frames.')
def _load_K(self, data: nyud.Item) -> ty.A:
raise NotImplementedError('NYUD-v2 does not provide camera intrinsics.')
def _load_stereo_image(self, data: nyud.Item) -> ty.A:
raise NotImplementedError('NYUD-v2 does not provide stereo frames.')
def _load_stereo_T(self, data: nyud.Item) -> ty.A:
raise NotImplementedError('NYUD-v2 does not provide stereo frames.')
|
@register('sintel')
class SintelDataset(MdeBaseDataset):
VALID_DATUM = 'image depth K'
SHAPE = (436, 1024)
def __init__(self, mode: str, datum: ty.U[(str, ty.S[str])]='image depth K', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if self.use_aug:
raise ValueError('Sintel is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('Sintel does not provide support frames.')
if self.randomize_supp:
raise ValueError('Sintel does not provide support frames.')
def parse_items(self) -> tuple[(Path, ty.S[si.Item])]:
file = si.Item.get_split_file(self.mode)
data = si.Item.load_split(self.mode)
return (file, data)
def _load_image(self, data: si.Item, offset: int=0) -> Image:
img = data.load_img()
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: si.Item) -> ty.A:
return data.load_depth()
def _load_K(self, data: si.Item) -> ty.A:
K = data.load_intrinsics()
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def get_supp_scale(self, data: si.Item) -> ty.A:
raise NotImplementedError('Sintel does not provide support frames.')
def _load_stereo_image(self, data: si.Item) -> ty.A:
raise NotImplementedError('Sintel does not provide stereo frames.')
def _load_stereo_T(self, data: si.Item) -> ty.A:
raise NotImplementedError('Sintel does not provide stereo frames.')
|
@register('slow_tv')
class SlowTvDataset(MdeBaseDataset):
'SlowTV dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for additional added metadata.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n supp_idxs: (Tensor) (n,) Indexes of the support frames relative to target.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n supp_imgs: (Tensor) (n, 3, h, w) Augmented support frames.\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {\n supp: (str) Support frame multiplier.\n }\n\n Parameters:\n :param split: (str) SlowTV split to use. {all, natural, driving, underwater}\n :param mode: (str) Training mode to use. {train, val}\n\n Attributes:\n :attr split_file: (Path) File containing the list of items in the loaded split.\n :attr items_data: (list[stv.Item]) List of dataset items as (seq, stem).\n :attr cats: (dict[str, str]) Dict containing the category of each sequence.\n '
VALID_DATUM = 'image support K'
SHAPE = (720, 1280)
def __init__(self, split: str, mode: str, **kwargs):
super().__init__(**kwargs)
self.split = split
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
self.cats = self.parse_cats()
self._max_offset_per_cat = {'natural': 5, 'driving': 1, 'underwater': 5}
def log_args(self):
self.logger.info(f"Split: '{self.split}' - Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
'Error checking for provided dataset configuration.'
super().validate_args()
if (0 in self.supp_idxs):
raise ValueError('SlowTV does not provide stereo pairs.')
def parse_items(self) -> tuple[(Path, ty.S[stv.Item])]:
'Helper to parse dataset items.'
(file, items) = stv.load_split(self.mode, self.split)
return (file, items)
def parse_cats(self) -> dict[(str, str)]:
'Helper to load the category for each sequence.'
return {seq: c for (seq, c) in zip(stv.get_seqs(), stv.load_categories(subcats=False))}
def _load_image(self, data: stv.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
file = stv.get_img_file(seq=data.seq, stem=(int(data.stem) + offset))
if (not file.is_file()):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{file}" with "offset={offset!r}"')
img = Image.open(file)
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def get_supp_scale(self, data: stv.Item) -> int:
'Generate the index of the support frame relative to the target image.'
if (not self.randomize_supp):
return 1
cat = self.cats[data.seq]
k = random.randint(1, self._max_offset_per_cat[cat])
return k
def _load_K(self, data: stv.Item) -> ty.A:
'Load camera intrinsics from dataset.'
K = stv.load_intrinsics(data[0])
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def _load_stereo_image(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain stereo pairs.')
def _load_stereo_T(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain stereo pairs.')
def _load_depth(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain ground-truth depth.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.