code stringlengths 17 6.64M |
|---|
@register('slow_tv_lmdb')
class SlowTvLmdbDataset(SlowTvDataset):
'SlowTV dataset using LMDBs. See `SlowTvDataset` for additional details.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_dbs = {}
self.calib_db = stv.load_calibs()
self.preload()
def parse_items(self) -> tuple[(Path, ty.S[stv.Item])]:
'Helper to parse dataset items.'
return stv.load_split(self.mode, self.split)
def parse_cats(self) -> dict[(str, str)]:
'Helper to load the category for each sequence.'
return {seq: c for (seq, c) in zip(stv.get_seqs(), stv.load_categories(subcats=False))}
def preload(self) -> None:
'Create all LMDBs for the required items.'
seqs = set((i.seq for i in self.items_data))
for s in seqs:
self.image_dbs[s] = stv.load_imgs(s)
def _load_image(self, data: stv.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
k = f'{(int(data.stem) + offset):010}'
kdb = data.seq
db = self.image_dbs[kdb]
if (k not in db):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{kdb}/{k}" with "offset={offset!r}"')
img = db[k]
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_K(self, data: stv.Item) -> ty.A:
'Load camera intrinsics from dataset.'
K = self.calib_db[data.seq]
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
|
@register('syns_patches')
class SynsPatchesDataset(MdeBaseDataset):
'SYNS-Patches dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Depth: Target ground-truth depth.\n - Edge: Target ground-truth depth boundaries.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for additional added metadata.\n\n Batch:\n x: {\n imgs: (Tensor) (3, h, w) Augmented target image.\n }\n\n y: {\n imgs: (Tensor) (3, h, w) Non-augmented target image.\n depth: (Tensor) (1, h, w) Ground-truth target depth.\n edges: (Tensor) (1, h, w) Ground-truth target depth boundaries.\n K: (Tensor) (4, 4) Camera intrinsics.\n }\n\n m: {\n cat: (str) Scene category.\n subcat: (str) Scene sub-category.\n supp: (str) Support frame multiplier.\n }\n\n Parameters:\n :param mode: (str) Training mode to use. {val, test}\n :param downsample_gt: (bool) If `True` downsample the gorund-truth depth to match the image resolution.\n\n Attributes:\n :attr split_file: (Path) File containing the list of items in the loaded split.\n :attr items_data: (list[syp.Item]) List of dataset items as (seq, cam, stem).\n '
VALID_DATUM = 'image depth edge K'
SHAPE = (376, 1242)
def __init__(self, mode: str, datum: ty.S[str]='image depth edge K', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
'Error checking for provided dataset configuration.'
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if self.use_aug:
raise ValueError('SYNS-Patches is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('SYNS-Patches does not provide support frames.')
if self.randomize_supp:
raise ValueError('SYNS-Patches does not provide support frames.')
def parse_items(self):
'Helper to parse dataset items.'
return syp.load_split(self.mode)
def add_metadata(self, data: syp.Item, batch: ty.BatchData) -> ty.BatchData:
'Add item metadata.'
m = batch[2]
(m['cat'], m['subcat']) = syp.load_category(data[0])
return batch
def _load_image(self, data: syp.Item, offset: int=0) -> Image:
'Load target image from dataset. Offset should be used when loading support frames.'
file = syp.get_image_file(data.seq, data.stem)
img = Image.open(file)
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: syp.Item) -> None:
'Load ground-truth benchmark depth from dataset (corrected LiDAR).'
file = syp.get_depth_file(data.seq, data.stem)
depth = np.load(file).astype(np.float32)
if self.should_resize:
depth = skit.resize(depth, self.shape, order=0, preserve_range=True, mode='constant')
return depth
def load_edge(self, data: syp.Item, batch: ty.BatchData) -> ty.BatchData:
'Load ground-truth depth boundaries and add to loss targets. (y)'
edges = self._load_edge(data)
batch[1]['edges'] = io.pil2np(edges)[(..., None)].astype(bool)
return batch
def _load_edge(self, data: syp.Item) -> Image:
'Load ground-truth depth boundaries from dataset.'
file = syp.get_edges_file(data.seq, 'edges', data.stem)
edge = Image.open(file)
if self.should_resize:
edge = edge.resize(self.size, resample=Image.Resampling.NEAREST)
return edge
def _load_K(self, data: syp.Item) -> ty.A:
'Load camera intrinsics from dataset.'
K = syp.load_intrinsics()
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def create_axs(self) -> ty.Axes:
'Create axes required for displaying.'
(_, axs) = plt.subplots(((1 + self.has('depth')) + self.has('edge')))
if isinstance(axs, plt.ty.Axes):
axs = np.array([axs])
plt.tight_layout()
return axs
def show(self, batch: ty.BatchData, axs: ty.Axes) -> None:
'Show a single dataset item.'
super().show(batch, axs)
y = batch[1]
if ('edges' in y):
axs[(- 1)].imshow(y['edges'])
def get_supp_scale(self, data: syp.Item) -> int:
raise NotImplementedError('SYNS-Patches does not contain support frames.')
def _load_stereo_image(self, data: syp.Item) -> None:
raise NotImplementedError('SYNS-Patches does not contain stereo pairs.')
def _load_stereo_T(self, data: syp.Item) -> None:
raise NotImplementedError('SYNS-Patches does not contain stereo pairs.')
def load_support(self, data: syp.Item, batch: ty.BatchData) -> ty.BatchData:
raise NotImplementedError('SYNS-Patches does not contain support frames.')
|
@register('tum')
class TumDataset(MdeBaseDataset):
VALID_DATUM = 'image depth'
SHAPE = (480, 640)
def __init__(self, mode: str, datum='image depth', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
def log_args(self):
self.logger.info(f"Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
self.VALID_DATUM.add('support')
super().validate_args()
self.VALID_DATUM.remove('support')
if self.use_aug:
raise ValueError('TUM-RGBD is a testing dataset, no augmentations should be applied.')
if self.supp_idxs:
raise ValueError('TUM-RGBD does not provide support frames.')
if self.randomize_supp:
raise ValueError('TUM-RGBD does not provide support frames.')
def parse_items(self) -> tuple[(Path, ty.S[tum.Item])]:
file = tum.Item.get_split_file(self.mode)
data = tum.Item.load_split(self.mode)
return (file, data)
def _load_image(self, data: tum.Item, offset: int=0) -> Image:
img = data.load_img()
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def _load_depth(self, data: tum.Item) -> ty.A:
return data.load_depth()
def get_supp_scale(self, data: tum.Item) -> ty.A:
raise NotImplementedError('TUM-RGBD does not provide support frames.')
def _load_K(self, data: tum.Item) -> ty.A:
raise NotImplementedError('TUM-RGBD does not provide camera intrinsics.')
def _load_stereo_image(self, data: tum.Item) -> ty.A:
raise NotImplementedError('TUM-RGBD does not provide stereo frames.')
def _load_stereo_T(self, data: tum.Item) -> ty.A:
raise NotImplementedError('TUM-RGBD does not provide stereo frames.')
|
def get_json_file() -> Path:
'Path to the official DDAD config file.'
return ((PATHS['ddad'] / 'ddad_train_val') / 'ddad.json')
|
def get_dataset(mode: str, datum: ty.S[str]) -> SynchronizedSceneDataset:
'Get the official DDAD dataset for the target split.\n\n :param mode: (str) Dataset split to load. {train, val}\n :param datum: (list[str]) DDAD data types to load. {camera_0[1-5], lidar}\n :return: (SynchronizedSceneDataset) DDAD dataset.\n '
if (mode not in {'train', 'val'}):
raise ValueError(f"DDAD provides only train and val splits. Got '{mode}'.")
return SynchronizedSceneDataset(str(get_json_file()), datum_names=datum, generate_depth_from_datum=('lidar' if ('lidar' in datum) else None), split=mode, use_diskcache=False)
|
@dataclass
class Item():
'Class to load items from DIODE dataset.'
mode: str
split: str
scene: str
scan: str
stem: str
@classmethod
def get_split_file(cls, mode: str, split: str) -> Path:
'Get path to split file based on mode {train, val} and scene type {indoors, outdoor}.'
return ((PATHS['diode'] / 'data_list') / f'{mode}_{split}.csv')
@classmethod
def load_split(cls, mode: str, split: str) -> list['Item']:
'Load split items based on mode {train, val} and scene type {indoors, outdoor}.'
lines = io.readlines(cls.get_split_file(mode, split))
lines = [Path(l.split(',')[0]) for l in lines]
items = [Item(mode=parts[(- 5)], split=parts[(- 4)], scene=parts[(- 3)], scan=parts[(- 2)], stem=f.stem) for f in lines if (parts := f.parts)]
return items
def get_img_file(self) -> Path:
'Get path to item image file.'
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}.png')
def get_depth_file(self) -> Path:
'Get path to item LiDAR depth file.'
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}_depth.npy')
def get_mask_file(self) -> Path:
'Get path to item valid LiDAR mask file.'
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}_depth_mask.npy')
def load_img(self) -> Image:
'Load image.'
return Image.open(self.get_img_file())
def load_depth(self) -> ty.A:
'Load LiDAR depth.'
return np.load(self.get_depth_file()).astype(np.float32)
def load_mask(self) -> ty.A:
'Load valid LiDAR mask.'
return np.load(self.get_mask_file()).astype(bool)
|
def get_split_file(mode: str) -> Path:
'Get the split filename for the specified `mode`.'
return ((PATHS['mannequin'] / 'splits') / f'{mode}_files.txt')
|
def get_info_file(mode: str, seq: str) -> Path:
'Get info filename with calibration and poses based on the mode and sequence.'
return (((PATHS['mannequin'] / mode) / seq) / f'calibration.txt')
|
def get_img_file(mode: str, seq: str, stem: ty.U[(str, int)]) -> Path:
'Get image filename based on the mode, sequence and item number.'
return (((PATHS['mannequin'] / mode) / seq) / f'{int(stem):05}.jpg')
|
def get_depth_file(mode: str, seq: str, stem: ty.U[(str, int)]) -> Path:
'Get image filename based on the mode, sequence and item number.'
return (((PATHS['mannequin'] / mode) / seq) / f'{int(stem):05}.npy')
|
def load_split(mode: str) -> tuple[(Path, ty.S[Item])]:
'Load items (as [seq, stem]) in the specified split.'
file = get_split_file(mode)
items = io.tmap(Item, io.readlines(file, split=True), star=True)
return (file, items)
|
def load_info(mode: str, seq: str) -> dict[(str, dict[(str, ty.A)])]:
'Load image shape, intrinsics and poses for each image in sequence based on the mode and sequence.'
file = get_info_file(mode, seq)
lines = io.readlines(file, split=True)
(n_imgs, offset) = map(int, lines.pop(0))
assert (len(lines) == (n_imgs * 6))
items = {}
for i in range(n_imgs):
(y_min, y_max, x_min, x_max) = map(int, lines.pop(0))
d = {'shape': (len(range(y_min, y_max)), len(range(x_min, x_max)))}
(fx, fy, cx, cy) = map(float, lines.pop(0))
d['K'] = np.array([[fx, 0, cx, 0], [0, fy, cy, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32)
R = np.array([io.tmap(float, lines.pop(0)) for _ in range(3)], dtype=np.float32)
t = np.array(io.tmap(float, lines.pop(0)), dtype=np.float32)
d['T'] = T_from_Rt(R, t)
items[f'{(i + offset):05d}'] = d
assert (not lines)
return items
|
def create_split(max=1000, seed=42):
mode = 'test'
root = (PATHS['mannequin'] / mode)
seq = io.get_dirs(root)
files = [f for s in seq for f in io.get_files(s, key=(lambda f: (f.suffix == '.npy')))]
random.seed(seed)
random.shuffle(files)
files = sorted(files[:max])
with open(get_split_file(mode), 'w') as f:
for file in files:
f.write(f'''{file.parent.stem} {file.stem}
''')
|
def get_split_file(mode: str) -> Path:
'Get the split filename for the specified `mode`.'
return ((PATHS['mannequin_lmdb'] / 'splits') / f'{mode}_files.txt')
|
def get_info_file(mode: str, seq: str) -> Path:
'Get info filename with calibration and poses based on the mode and sequence.'
return (((PATHS['mannequin_lmdb'] / mode) / seq) / f'calibration.txt')
|
def get_imgs_path(mode: str) -> Path:
'Get image LMDB filename based on the mode and sequence.'
return ((PATHS['mannequin_lmdb'] / mode) / 'images')
|
def get_depths_path(mode: str) -> Path:
'Get image LMDB filename based on the mode and sequence.'
return ((PATHS['mannequin_lmdb'] / mode) / 'depths')
|
def get_shapes_path(mode: str) -> Path:
'Get image LMDB filename based on the mode and sequence.'
return ((PATHS['mannequin_lmdb'] / mode) / 'shapes')
|
def get_intrinsics_path(mode: str) -> Path:
'Get image LMDB filename based on the mode and sequence.'
return ((PATHS['mannequin_lmdb'] / mode) / 'intrinsics')
|
def get_poses_path(mode: str) -> Path:
'Get image LMDB filename based on the mode and sequence.'
return ((PATHS['mannequin_lmdb'] / mode) / 'poses')
|
def load_split(mode: str) -> tuple[(Path, ty.S[Item])]:
'Load items (as [seq, stem]) in the specified split.'
file = get_split_file(mode)
items = io.tmap(Item, io.readlines(file, split=True), star=True)
return (file, items)
|
def load_info(mode: str, seq: str) -> dict[(str, dict[(str, ty.A)])]:
'Load image shape, intrinsics and poses for each image in sequence based on the mode and sequence.'
file = get_info_file(mode, seq)
lines = io.readlines(file, split=True)
(n_imgs, offset) = map(int, lines.pop(0))
assert (len(lines) == (n_imgs * 6))
items = {}
for i in range(n_imgs):
(y_min, y_max, x_min, x_max) = map(int, lines.pop(0))
d = {'shape': (len(range(y_min, y_max)), len(range(x_min, x_max)))}
(fx, fy, cx, cy) = map(float, lines.pop(0))
d['K'] = np.array([[fx, 0, cx, 0], [0, fy, cy, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32)
R = np.array([io.tmap(float, lines.pop(0)) for _ in range(3)], dtype=np.float32)
t = np.array(io.tmap(float, lines.pop(0)), dtype=np.float32)
d['T'] = T_from_Rt(R, t)
items[f'{(i + offset):05d}'] = d
assert (not lines)
return items
|
def load_imgs(mode: str) -> ImageDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_imgs_path(mode)
return ImageDatabase(path)
|
def load_depths(mode: str) -> LabelDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_depths_path(mode)
return LabelDatabase(path)
|
def load_shapes(mode: str) -> LabelDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_shapes_path(mode)
return LabelDatabase(path)
|
def load_intrinsics(mode: str) -> LabelDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_intrinsics_path(mode)
return LabelDatabase(path)
|
def load_poses(mode: str) -> LabelDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_poses_path(mode)
return LabelDatabase(path)
|
def create_split_file(mode: str='train') -> None:
'Helper to create the files for each dataset split. {train, val, test}'
split_file = ((PATHS['mapfree'] / 'splits') / f'{mode}_files.txt')
io.mkdirs(split_file.parent)
files = sorted((PATHS['mapfree'] / mode).glob('./*/seq?/*.jpg'))
items = [f'''{f.parent.parent.stem} {f.parent.stem} {f.stem}
''' for f in files]
with open(split_file, 'w') as f:
f.writelines(items)
|
@dataclass
class Item():
'Class to load items from MapFreeReloc dataset.'
mode: str
scene: str
seq: str
stem: str
@classmethod
def get_split_file(cls, mode: str) -> Path:
'Get path to dataset split. {train, val, test}'
return ((PATHS['mapfree'] / 'splits') / f'{mode}_files.txt')
@classmethod
def load_split(cls, mode: str) -> ty.S['Item']:
'Load dataset split. {train, val, test}'
return [cls(mode, *s) for s in io.readlines(cls.get_split_file(mode), split=True)]
def get_img_file(self) -> Path:
'Get path to image file.'
return ((((PATHS['mapfree'] / self.mode) / self.scene) / self.seq) / f'{self.stem}.jpg')
def get_depth_file(self, src) -> Path:
'Get path to depth file.'
return ((((PATHS['mapfree'] / self.mode) / self.scene) / self.seq) / f'{self.stem}.{src}.png')
def get_intrinsics_file(self) -> Path:
'Get path to intrinsics file. One per scene.'
return (((PATHS['mapfree'] / self.mode) / self.scene) / 'intrinsics.txt')
def get_poses_file(self) -> Path:
'Get path to poses file. One per scene.'
return (((PATHS['mapfree'] / self.mode) / self.scene) / 'poses.txt')
def load_img(self) -> Image:
'Load image.'
return Image.open(self.get_img_file())
def load_depth(self, src: str) -> ty.A:
'Load depth, encoded in mm'
depth = cv2.imread(str(self.get_depth_file(src)), cv2.IMREAD_UNCHANGED)
depth = (depth[(..., None)].astype(np.float32) / 1000)
return depth
def load_intrinsics(self) -> ty.A:
'Load intrinsics.\n Intrinsics are given as a single file per scene. We scan the file for the matching stem and load it.\n Not the most efficient, but it matches the interface of other datasets.\n '
lines = io.readlines(self.get_intrinsics_file(), split=True)
stem = f'{self.seq}/{self.stem}.jpg'
line = next((l for l in lines if (l[0] == stem)))
intrinsics = io.lmap(float, line[1:])
K = np.zeros((4, 4), dtype=np.float32)
((K[(0, 0)], K[(1, 1)], K[(0, 2)], K[(1, 2)]), K[(2, 2)], K[(3, 3)]) = (intrinsics[:(- 2)], 1, 1)
return K
def load_pose(self) -> ty.A:
'Load poses.\n Poses are given as a single file per scene. They are represented as a quaternion (w, x, y, z) and translation\n (x, y, z). We scan the file for the matching stem and load it. Not the most efficient, but it matches the\n interface of other datasets.\n '
lines = io.readlines(self.get_poses_file(), split=True)
stem = f'{self.seq}/{self.stem}.jpg'
line = next((l for l in lines if (l[0] == stem)))
(q, t) = (io.lmap(float, line[1:5]), io.lmap(float, line[5:8]))
T = geo.T_from_qt(np.array(q), np.array(t)).astype(np.float32)
return T
|
@dataclass
class Item():
'Class to load items from the NYU Depth V2 dataset.'
mode: str
stem: str
@classmethod
def get_split_file(cls, mode: str) -> Path:
'Get path to dataset split. {train, test}.'
return ((PATHS['nyud'] / 'splits') / f'{mode}_files.txt')
@classmethod
def load_split(cls, mode: str) -> ty.S['Item']:
'Load dataset split. {train, test}'
return [cls(mode, s) for s in io.readlines(cls.get_split_file(mode))]
def get_img_file(self) -> Path:
'Get path to image file.'
return (((PATHS['nyud'] / self.mode) / 'rgb') / f'{self.stem}.png')
def get_depth_file(self) -> Path:
'Get path to Kinect depth file.'
return (((PATHS['nyud'] / self.mode) / 'depth') / f'{self.stem}.npy')
def load_img(self) -> Image:
'Load image.'
return Image.open(self.get_img_file())
def load_depth(self) -> ty.A:
'Load Kinect depth map.'
return np.load(self.get_depth_file())
|
def create_splits() -> None:
'Create train split based on all left camera files.'
split_file = ((PATHS['sintel'] / 'splits') / 'train_files.txt')
io.mkdirs(split_file.parent)
files = sorted(((PATHS['sintel'] / 'train') / 'camdata_left').glob('**/*.cam'))
items = [f'''{f.parent.stem} {f.stem}
''' for f in files]
with open(split_file, 'w') as f:
[f.write(i) for i in items]
|
@dataclass
class Item():
'Class to load Sintel items. NOTE: We use the official TRAINING split as our TEST set.'
mode: str
seq: str
stem: str
@classmethod
def get_split_file(cls, mode: str) -> Path:
'Get path to dataset split. {train}'
return ((PATHS['sintel'] / 'splits') / f'{mode}_files.txt')
@classmethod
def load_split(cls, mode: str) -> ty.S['Item']:
'Load dataset split. {train}'
return [cls(mode, *s) for s in io.readlines(cls.get_split_file(mode), split=True)]
def get_img_file(self) -> Path:
'Get path to image file.'
return ((((PATHS['sintel'] / self.mode) / 'final') / self.seq) / f'{self.stem}.png')
def get_depth_file(self) -> Path:
'Get path to synthetic depth file.'
return ((((PATHS['sintel'] / self.mode) / 'depth') / self.seq) / f'{self.stem}.dpt')
def get_cam_file(self) -> Path:
'Get path to camera intrinsics/extrinsics file.'
return ((((PATHS['sintel'] / self.mode) / 'camdata_left') / self.seq) / f'{self.stem}.cam')
def load_img(self) -> Image:
'Load image.'
return Image.open(self.get_img_file())
def load_depth(self) -> ty.A:
'Load synthetic depth map. Adapted from the official devkit.'
with open(self.get_depth_file(), 'rb') as f:
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert (check == TAG_FLOAT), f'Wrong tag in depth file ({check} vs. {TAG_FLOAT}). Big-endian machine?'
w = np.fromfile(f, dtype=np.int32, count=1)[0]
h = np.fromfile(f, dtype=np.int32, count=1)[0]
numel = (w * h)
assert ((w > 0) and (h > 0) and (numel > 1) and (numel < 100000000)), f'Wrong input size (w={w!r}, h={h!r})'
depth = np.fromfile(f, dtype=np.float32, count=(- 1)).reshape((h, w))[(..., None)]
return depth
def load_intrinsics(self) -> ty.A:
'Load camera intrinsics. Adapted from the official devkit.'
with open(self.get_cam_file(), 'rb') as f:
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert (check == TAG_FLOAT), f'Wrong tag in cam file ({check} vs. {TAG_FLOAT}). Big-endian machine?'
K = np.fromfile(f, dtype='float64', count=9).reshape((3, 3)).astype(np.float32)
K = geo.pad_K(K)
return K
|
def get_split_file(mode: str, split: str) -> Path:
'Get the split filename for the specified `mode`.'
file = (((PATHS['slow_tv_lmdb'] / 'splits') / f'{split}') / f'{mode}_files.txt')
return file
|
def get_category_file() -> Path:
'Get filename containing list of video URLs.'
return ((PATHS['slow_tv_lmdb'] / 'splits') / f'categories.txt')
|
def get_seqs() -> tuple[str]:
'Get tuple of sequences names in dataset.'
dirs = io.get_dirs(PATHS['slow_tv_lmdb'], key=(lambda d: (d.stem not in {'splits', 'videos', 'colmap'})))
dirs = io.tmap((lambda d: d.stem), dirs)
return dirs
|
def get_imgs_path(seq: str) -> Path:
'Get image LMDB filename based on the sequence.'
return (PATHS['slow_tv_lmdb'] / seq)
|
def get_calibs_path() -> Path:
'Get calibration LMDB filename based on the sequence.'
return (PATHS['slow_tv_lmdb'] / 'calibs')
|
def load_categories(subcats: bool=True) -> list[str]:
'Load list of categories per SlowTV scenes.'
file = get_category_file()
lines = [line.lower() for line in io.readlines(file)]
if (not subcats):
lines = [line.split('-')[0] for line in lines]
return lines
|
def load_split(mode: str, split: str) -> tuple[(Path, ty.S[Item])]:
'Load the split filename and items as (seq, stem).'
file = get_split_file(mode, split)
items = io.tmap(Item, io.readlines(file, split=True), star=True)
return (file, items)
|
def load_imgs(seq: str) -> ImageDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_imgs_path(seq)
return ImageDatabase(path)
|
def load_calibs() -> LabelDatabase:
'Load the image LMDB based on the mode and sequence.'
path = get_calibs_path()
return LabelDatabase(path)
|
def get_split_file(mode: str) -> Path:
'Get scene information file based on the scene number.'
file = ((PATHS['syns_patches'] / 'splits') / f'{mode}_files.txt')
return file
|
def get_scenes() -> list[Path]:
'Get paths to each of the scenes.'
return sorted((path for path in PATHS['syns_patches'].iterdir() if (path.is_dir() and (path.stem != 'splits'))))
|
def get_scene_files(scene_dir: Path) -> dict[(str, ty.S[Path])]:
'Get paths to all subdir files for a given scene.'
files = {key: sorted((scene_dir / key).iterdir()) for key in SUBDIRS if (scene_dir / key).is_dir()}
return files
|
def get_info_file(scene: str) -> Path:
'Get scene information file based on the scene number.'
paths = (PATHS['syns_patches'] / scene).iterdir()
return next((f for f in paths if (f.suffix == '.txt')))
|
def get_image_file(scene: str, file: str) -> Path:
'Get image filename based on scene and item number.'
return (((PATHS['syns_patches'] / scene) / 'images') / file)
|
def get_depth_file(scene: str, file: str) -> Path:
'Get image filename based on scene and item number.'
return (((PATHS['syns_patches'] / scene) / 'depths') / file).with_suffix('.npy')
|
def get_edges_file(scene: str, subdir: str, file: str) -> Path:
'Get image filename based on scene and item number.'
assert ('edges' in subdir), f'Must provide an "edges" directory. ({subdir})'
assert (subdir in SUBDIRS), f"Non-existent edges directory. ({subdir} vs. {[s for s in SUBDIRS if ('edges' in s)]})"
return (((PATHS['syns_patches'] / scene) / subdir) / file)
|
def load_info(scene: str) -> ty.S[str]:
'Load the scene information.'
file = get_info_file(scene)
info = io.readlines(file, encoding='latin-1')
return info
|
def load_category(scene: str) -> tuple[(str, str)]:
'Load the scene category and subcategory.'
info = load_info(scene)
category = info[1].replace('Scene Category: ', '')
try:
(cat, subcat) = category.split(': ')
except ValueError:
(cat, subcat) = category.split(' - ')
return (cat, subcat)
|
def load_split(mode) -> tuple[(Path, ty.S[Item])]:
'Load the list of scenes and filenames that are part of the test split.\n\n Test split file is given as "SEQ ITEM":\n ```\n 01 00.png\n 10 11.png\n ```\n '
file = get_split_file(mode)
lines = io.tmap(Item, io.readlines(file, split=True), star=True)
return (file, lines)
|
def load_intrinsics() -> ty.A:
'Computes the virtual camera intrinsics for the `Kitti` based SYNS Patches.\n We compute this based on the desired FOV, using basic trigonometry.\n\n :return: (ndarray) (4, 4) Camera intrinsic parameters.\n '
(Fy, Fx) = KITTI_FOV
(h, w) = KITTI_SHAPE
(cx, cy) = ((w // 2), (h // 2))
fx = (cx / np.tan((np.deg2rad(Fx) / 2)))
fy = (cy / np.tan((np.deg2rad(Fy) / 2)))
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32)
return K
|
@dataclass
class Item():
'Class to load items from TUM-RGBD dataset.'
seq: str
rgb_stem: str
depth_stem: str
@classmethod
def get_split_file(cls, mode: str) -> Path:
'Get path to dataset split. {test}'
return ((PATHS['tum'] / 'splits') / f'{mode}_files.txt')
@classmethod
def load_split(cls, mode: str) -> ty.S['Item']:
'Load dataset split. {test}'
file = cls.get_split_file(mode)
return [cls(*line) for line in io.readlines(file, split=True)]
def get_img_file(self) -> Path:
'Get path to image file.'
return ((PATHS['tum'] / self.seq) / self.rgb_stem)
def get_depth_file(self) -> Path:
'Get path to Kinect depth file.'
return ((PATHS['tum'] / self.seq) / self.depth_stem)
def load_img(self) -> Image:
'Load image.'
file = self.get_img_file()
img = Image.open(file)
return img
def load_depth(self) -> ty.A:
'Load Kinect depth map.'
file = self.get_depth_file()
depth = (np.array(Image.open(file), dtype=np.float32) / 5000)
return depth[(..., None)]
|
def create_splits(th: float=0.02, max: int=2500, seed: int=42) -> None:
'Create a split of associated images & depth maps.\n\n :param th: (float) Maximum time difference between two images to be considered as associated.\n :param max: (int) Maximum number of images in split.\n :param seed: (int) Random seed.\n :return\n '
file = ((PATHS['tum'] / 'splits') / 'test_files.txt')
io.mkdirs(file.parent)
items = []
seqs = io.get_dirs(PATHS['tum'], key=(lambda f: (f.stem != 'splits')))
for seq in seqs:
img_file = (seq / 'rgb.txt')
depths_file = (seq / 'depth.txt')
first_list = read_file_list(img_file)
second_list = read_file_list(depths_file)
matches = associate(first_list, second_list, offset=0, max_difference=th)
if ('freiburg2' in seq.stem):
matches = matches[::3]
for (a, b) in matches:
items.append(f'''{seq.stem} {first_list[a][0]} {second_list[b][0]}
''')
random.seed(seed)
random.shuffle(items)
items = sorted(items[:max])
with open(file, 'w') as f:
f.writelines(items)
|
def read_file_list(filename):
'Reads a trajectory from a text file. From: https://cvg.cit.tum.de/data/datasets/rgbd-dataset/tools\n\n File format:\n The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)\n and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.\n\n Input:\n filename -- File name\n\n Output:\n dict -- dictionary of (stamp,data) tuples\n '
with open(filename) as f:
data = f.read()
lines = data.replace(',', ' ').replace('\t', ' ').split('\n')
list = [[v.strip() for v in line.split(' ') if (v.strip() != '')] for line in lines if ((len(line) > 0) and (line[0] != '#'))]
list = [(float(l[0]), l[1:]) for l in list if (len(l) > 1)]
return dict(list)
|
def associate(first_list, second_list, offset, max_difference):
'Associate image and depth pairs. From: https://cvg.cit.tum.de/data/datasets/rgbd-dataset/tools\n\n Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim\n to find the closest match for every input tuple.\n\n Input:\n first_list -- first dictionary of (stamp,data) tuples\n second_list -- second dictionary of (stamp,data) tuples\n offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)\n max_difference -- search radius for candidate generation\n\n Output:\n matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))\n '
first_keys = list(first_list.keys())
second_keys = list(second_list.keys())
potential_matches = [(abs((a - (b + offset))), a, b) for a in first_keys for b in second_keys if (abs((a - (b + offset))) < max_difference)]
potential_matches.sort()
matches = []
for (diff, a, b) in potential_matches:
if ((a in first_keys) and (b in second_keys)):
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
|
class Database():
_database = None
_protocol = None
_length = None
def __init__(self, path: PathLike, readahead: bool=True, pre_open: bool=False):
'Base class for LMDB-backed _databases.\n\n :param path: (PathLike) Path to the database.\n :param readahead: (bool) If `True`, enables the filesystem readahead mechanism.\n :param pre_open: (bool) If `True`, the first iterations will be faster, but it will raise error when doing multi-gpu training.\n If `False`, the database will open when you will retrieve the first item.\n '
self.path = str(path)
self.readahead = readahead
self.pre_open = pre_open
self._has_fetched_an_item = False
@property
def database(self):
if (self._database is None):
self._database = lmdb.open(path=self.path, readonly=True, readahead=self.readahead, max_spare_txns=256, lock=False)
return self._database
@database.deleter
def database(self):
if (self._database is not None):
self._database.close()
self._database = None
@property
def protocol(self):
'Read the pickle protocol contained in the database.\n\n :return: The set of available keys.\n '
if (self._protocol is None):
self._protocol = self._get(item='protocol', convert_key=(lambda key: key.encode('ascii')), convert_value=(lambda value: pickle.loads(value)))
return self._protocol
@property
def keys(self):
'Read the keys contained in the database.\n\n :return: The set of available keys.\n '
protocol = self.protocol
keys = self._get(item='keys', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value)))
return keys
def __len__(self):
'Returns the number of keys available in the database.\n\n :return: The number of keys.\n '
if (self._length is None):
self._length = len(self.keys)
return self._length
def __getitem__(self, item):
'Retrieves an item or a list of items from the database.\n\n :param item: A key or a list of keys.\n :return: A value or a list of values.\n '
self._has_fetched_an_item = True
if (not isinstance(item, list)):
item = self._get(item, self._convert_key, self._convert_value)
else:
item = self._gets(item, self._convert_keys, self._convert_values)
return item
def __contains__(self, item):
'Check if a given key is in the database.'
return (item in self.keys)
def index(self, index):
'Retrieves an item or a list of items from the database from an integer index.\n\n :param index: An index or a list of indexes.\n :return: A value or a list of values.\n '
key = self.keys[index]
return (key, self[key])
def _get(self, item, convert_key, convert_value):
'Instantiates a transaction and its associated cursor to fetch an item.\n\n :param item: A key.\n :param convert_key:\n :param convert_value:\n :return:\n '
with self.database.begin() as txn:
with txn.cursor() as cursor:
item = self._fetch(cursor, item, convert_key, convert_value)
self._keep_database()
return item
def _gets(self, items, convert_keys, convert_values):
'Instantiates a transaction and its associated cursor to fetch a list of items.\n\n :param items: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return:\n '
with self.database.begin() as txn:
with txn.cursor() as cursor:
items = self._fetchs(cursor, items, convert_keys, convert_values)
self._keep_database()
return items
def _fetch(self, cursor, key, convert_key, convert_value):
'Retrieve a value given a key.\n\n :param cursor:\n :param key: A key.\n :param convert_key:\n :param convert_value:\n :return: A value.\n '
key = convert_key(key=key)
value = cursor.get(key=key)
value = convert_value(value=value)
return value
def _fetchs(self, cursor, keys, convert_keys, convert_values):
'Retrieve a list of values given a list of keys.\n\n :param cursor:\n :param keys: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return: A list of values.\n '
keys = convert_keys(keys=keys)
(_, values) = list(zip(*cursor.getmulti(keys)))
values = convert_values(values=values)
return values
def _convert_key(self, key):
'Converts a key into a byte key.\n\n :param key: A key.\n :return: A byte key.\n '
return pickle.dumps(key, protocol=self.protocol)
def _convert_keys(self, keys):
'Converts keys into byte keys.\n\n :param keys: A list of keys.\n :return: A list of byte keys.\n '
return [self._convert_key(key=key) for key in keys]
def _convert_value(self, value):
'Converts a byte value back into a value.\n\n :param value: A byte value.\n :return: A value\n '
return pickle.loads(value)
def _convert_values(self, values):
'Converts bytes values back into values.\n\n :param values: A list of byte values.\n :return: A list of values.\n '
return [self._convert_value(value=value) for value in values]
def _keep_database(self):
'Checks if the database must be deleted.'
if ((not self.pre_open) and (not self._has_fetched_an_item)):
del self.database
def __iter__(self):
'Provides an iterator over the keys when iterating over the database.'
return iter(self.keys)
def __del__(self):
'Closes the database properly.'
del self.database
|
class ImageDatabase(Database):
def _convert_value(self, value):
'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL Image image.\n '
return Image.open(io.BytesIO(value))
|
class MaskDatabase(ImageDatabase):
def _convert_value(self, value):
'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL image.\n '
return Image.open(io.BytesIO(value)).convert('1')
|
class LabelDatabase(Database):
pass
|
class ArrayDatabase(Database):
_dtype = None
_shape = None
@property
def dtype(self):
if (self._dtype is None):
protocol = self.protocol
self._dtype = self._get(item='dtype', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value)))
return self._dtype
@property
def shape(self):
if (self._shape is None):
protocol = self.protocol
self._shape = self._get(item='shape', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value)))
return self._shape
def _convert_value(self, value):
return np.frombuffer(value, dtype=self.dtype).reshape(self.shape)
def _convert_values(self, values):
return np.frombuffer(b''.join(values), dtype=self.dtype).reshape(((len(values),) + self.shape))
|
class TensorDatabase(ArrayDatabase):
def _convert_value(self, value):
return torch.from_numpy(super(TensorDatabase, self)._convert_value(value))
def _convert_values(self, values):
return torch.from_numpy(super(TensorDatabase, self)._convert_values(values))
|
def write_image_database(d: dict, database: Path):
database.parent.mkdir(parents=True, exist_ok=True)
if database.exists():
shutil.rmtree(database)
tmp_database = database
with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40), writemap=True) as env:
with env.begin(write=True) as txn:
key = 'protocol'.encode('ascii')
value = pickle.dumps(pickle.DEFAULT_PROTOCOL)
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
key = pickle.dumps('keys')
value = pickle.dumps(sorted(d.keys()))
txn.put(key=key, value=value, dupdata=False)
for (key, value) in tqdm(sorted(d.items())):
with env.begin(write=True) as txn:
with value.open('rb') as file:
key = pickle.dumps(key)
txn.put(key=key, value=file.read(), dupdata=False)
|
def write_label_database(d: dict, database: Path):
database.parent.mkdir(parents=True, exist_ok=True)
if database.exists():
shutil.rmtree(database)
tmp_dir = (Path('/tmp') / f'TEMP_{time()}')
tmp_dir.mkdir(parents=True)
tmp_database = (tmp_dir / f'{database.name}')
with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env:
with env.begin(write=True) as txn:
key = 'protocol'.encode('ascii')
value = pickle.dumps(pickle.DEFAULT_PROTOCOL)
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
key = pickle.dumps('keys')
value = pickle.dumps(sorted(d.keys()))
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
for (key, value) in tqdm(sorted(d.items())):
key = pickle.dumps(key)
value = pickle.dumps(value)
txn.put(key=key, value=value, dupdata=False)
shutil.move(f'{tmp_database}', database)
shutil.rmtree(tmp_dir)
|
def write_array_database(d: dict, database: Path):
database.parent.mkdir(parents=True, exist_ok=True)
if database.exists():
shutil.rmtree(database)
tmp_database = database
with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env:
with env.begin(write=True) as txn:
key = 'protocol'.encode('ascii')
value = pickle.dumps(pickle.DEFAULT_PROTOCOL)
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
key = pickle.dumps('keys')
value = pickle.dumps(sorted(d.keys()))
txn.put(key=key, value=value, dupdata=False)
value = next(iter(d.values()))
shape = value.shape
dtype = value.dtype
with env.begin(write=True) as txn:
key = pickle.dumps('shape')
value = pickle.dumps(shape)
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
key = pickle.dumps('dtype')
value = pickle.dumps(dtype)
txn.put(key=key, value=value, dupdata=False)
with env.begin(write=True) as txn:
for (key, value) in tqdm(sorted(d.items())):
key = pickle.dumps(key)
value = pickle.dumps(value)
txn.put(key=key, value=value, dupdata=False)
|
class AgentSnapshot2DList(AgentSnapshotList):
'Container for 2D agent list.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n \n TODO : Add support for BoundingBox2DAnnotationList.\n boxlist: list[BoundingBox2D]\n List of BoundingBox2D objects. See `utils/structures/bounding_box_2d`\n for more details.\n '
def __init__(self, ontology, boxlist):
super().__init__(ontology)
assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load AgentSnapshot2DList with wrong type of ontology!'
for box in boxlist:
assert isinstance(box, BoundingBox2D), f'Can only instantiate an agent snapshot list from a list of BoundingBox2D, not {type(box)}'
self.boxlist = boxlist
@classmethod
def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):
'Loads agent snapshot list from proto into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for agent types will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: dgp.proto.agent.AgentsSlice.agent_snapshots or dgp.proto.agent.AgentTrack.agent_snapshots\n A proto message holding list of agent snapshot.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict, optional\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n Default: None.\n\n Returns\n -------\n AgentSnapshot2DList\n Agent Snapshot list object instantiated from proto object.\n '
boxlist = []
for agent_snapshot_2d in agent_snapshots_pb2:
feature_type = agent_snapshot_2d.agent_snapshot_2D.feature_type
feature_ontology = feature_ontology_table[FEATURE_TYPE_ID_TO_KEY[feature_type]]
boxlist.append(BoundingBox2D(box=np.float32([agent_snapshot_2d.agent_snapshot_2D.box.x, agent_snapshot_2d.agent_snapshot_2D.box.y, agent_snapshot_2d.agent_snapshot_2D.box.w, agent_snapshot_2d.agent_snapshot_2D.box.h]), class_id=ontology.class_id_to_contiguous_id[agent_snapshot_2d.agent_snapshots_2D.class_id], instance_id=agent_snapshot_2d.agent_snapshot_2D.instance_id, color=ontology.colormap[agent_snapshot_2d.agent_snapshot_2D.class_id], attributes=dict([(feature_ontology.id_to_name[feature_id], feature) for (feature_id, feature) in enumerate(agent_snapshot_2d.agent_snapshot_2D.features)])))
return cls(ontology, boxlist)
def __len__(self):
return len(self.boxlist)
def __getitem__(self, index):
'Return a single 3D bounding box'
return self.boxlist[index]
def render(self):
'TODO: Batch rendering function for bounding boxes.'
@property
def class_ids(self):
'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.int64([box.class_id for box in self.boxlist])
@property
def attributes(self):
'Return a list of dictionaries of attribute name to value.'
return [box.attributes for box in self.boxlist]
@property
def instance_ids(self):
return np.int64([box.instance_id for box in self.boxlist])
|
class AgentSnapshot3DList(AgentSnapshotList):
'Container for 3D agent list.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n boxlist: list[BoundingBox3D]\n List of BoundingBox3D objects. See `utils/structures/bounding_box_3d`\n for more details.\n '
def __init__(self, ontology, boxlist):
super().__init__(ontology)
assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load AgentSnapshot3DList with wrong type of ontology!'
for box in boxlist:
assert isinstance(box, BoundingBox3D), f'Can only instantiate an agent snapshot list from a list of BoundingBox3D, not {type(box)}'
self.boxlist = boxlist
@classmethod
def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):
'Loads agent snapshot list from proto into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for agent types will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: dgp.proto.agent.AgentsSlice.agent_snapshots or dgp.proto.agent.AgentTrack.agent_snapshots\n A proto message holding list of agent snapshot.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n\n Returns\n -------\n AgentSnapshot3DList\n Agent Snapshot list object instantiated from proto object.\n '
boxlist = []
for agent_snapshot_3d in agent_snapshots_pb2:
feature_type = agent_snapshot_3d.agent_snapshot_3D.feature_type
feature_ontology = feature_ontology_table[FEATURE_TYPE_ID_TO_KEY[feature_type]]
boxlist.append(BoundingBox3D(pose=Pose.load(agent_snapshot_3d.agent_snapshot_3D.box.pose), sizes=np.float32([agent_snapshot_3d.agent_snapshot_3D.box.width, agent_snapshot_3d.agent_snapshot_3D.box.length, agent_snapshot_3d.agent_snapshot_3D.box.height]), class_id=ontology.class_id_to_contiguous_id[agent_snapshot_3d.agent_snapshot_3D.class_id], instance_id=agent_snapshot_3d.agent_snapshot_3D.instance_id, sample_idx=agent_snapshot_3d.slice_id.index, color=ontology.colormap[agent_snapshot_3d.agent_snapshot_3D.class_id], attributes=dict([(feature_ontology.id_to_name[feature_id], feature) for (feature_id, feature) in enumerate(agent_snapshot_3d.agent_snapshot_3D.features)])))
return cls(ontology, boxlist)
def __len__(self):
return len(self.boxlist)
def __getitem__(self, index):
'Return a single 3D bounding box'
return self.boxlist[index]
def render(self, image, camera, line_thickness=2, font_scale=0.5):
'Render the 3D boxes in this agents on the image in place\n\n Parameters\n ----------\n image: np.ndarray\n Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.\n Data type is uint8.\n\n camera: dgp.utils.camera.Camera\n Camera used to render the bounding box.\n\n line_thickness: int, optional\n Thickness of bounding box lines. Default: 2.\n\n font_scale: float, optional\n Font scale used in text labels. Default: 0.5.\n\n Raises\n ------\n ValueError\n Raised if `image` is not a 3-channel uint8 numpy array.\n TypeError\n Raised if `camera` is not an instance of Camera.\n '
if ((not isinstance(image, np.ndarray)) or (image.dtype != np.uint8) or (len(image.shape) != 3) or (image.shape[2] != 3)):
raise ValueError('`image` needs to be a 3-channel uint8 numpy array')
if (not isinstance(camera, Camera)):
raise TypeError('`camera` should be of type Camera')
for box in self.boxlist:
box.render(image, camera, line_thickness=line_thickness, class_name=self._ontology.contiguous_id_to_name[box.class_id], font_scale=font_scale)
@property
def poses(self):
'Get poses for bounding boxes in agent list.'
return [box.pose for box in self.boxlist]
@property
def sizes(self):
return np.float32([box.sizes for box in self.boxlist])
@property
def class_ids(self):
'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.int64([box.class_id for box in self.boxlist])
@property
def attributes(self):
'Return a list of dictionaries of attribute name to value.'
return [box.attributes for box in self.boxlist]
@property
def instance_ids(self):
return np.int64([box.instance_id for box in self.boxlist])
|
class AgentSnapshotList(ABC):
'Base agent snapshot list type. All other agent snapshot lists should inherit from this type and implement\n abstractmethod.\n\n Parameters\n ----------\n ontology: Ontology, default:None\n Ontology object for the annotation key.\n\n '
def __init__(self, ontology=None):
if (ontology is not None):
assert isinstance(ontology, Ontology), 'Invalid ontology!'
self._ontology = ontology
@property
def ontology(self):
return self._ontology
@classmethod
def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):
'Loads agent snapshot list from prot into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for annotations will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: object\n An agent proto message holding agent information.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict, optional\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n Default: None.\n '
@abstractmethod
def render(self):
'Return a rendering of the agent snapshot list. Expected format is a PIL.Image or np.array'
|
class Annotation(ABC):
'Base annotation type. All other annotations should inherit from this type and implement\n member functions.\n\n Parameters\n ----------\n ontology: Ontology, default: None\n Ontology object for the annotation key\n '
def __init__(self, ontology=None):
if (ontology is not None):
assert isinstance(ontology, Ontology), 'Invalid ontology!'
self._ontology = ontology
@property
def ontology(self):
return self._ontology
@classmethod
@abstractmethod
def load(cls, annotation_file, ontology):
'Loads annotation from file into a canonical format for consumption in __getitem__ function in BaseDataset.\n Format/data structure for annotations will vary based on task.\n\n Parameters\n ----------\n annotation_file: str\n Full path to annotation\n\n ontology: Ontology\n Ontology for given annotation\n '
@abstractmethod
def save(self, save_dir):
'Serialize annotation object if possible, and saved to specified directory.\n Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Path to directory to saved annotation\n '
@abstractmethod
def render(self):
'Return a rendering of the annotation. Expected format is a PIL.Image or np.array'
@property
@abstractmethod
def hexdigest(self):
'Reproducible hash of annotation.'
def __eq__(self, other):
return (self.hexdigest == other.hexdigest)
def __repr__(self):
return f'{self.__class__.__name__}[{os.path.basename(self.hexdigest)}]'
|
class BoundingBox2DAnnotationList(Annotation):
'Container for 2D bounding box annotations.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n\n boxlist: list[BoundingBox2D]\n List of BoundingBox2D objects. See `dgp/utils/structures/bounding_box_2d` for more details.\n '
def __init__(self, ontology, boxlist):
super().__init__(ontology)
assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load annotation with wrong type of ontology!'
for box in boxlist:
assert isinstance(box, BoundingBox2D), f'Can only instantate an annotation from a list of BoundingBox2D, not {type(box)}'
self.boxlist = boxlist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n\n Returns\n -------\n BoundingBox2DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, BoundingBox2DAnnotations)
boxlist = [BoundingBox2D(box=np.float32([ann.box.x, ann.box.y, ann.box.w, ann.box.h]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], instance_id=ann.instance_id, color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, boxlist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n BoundingBox2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n '
return BoundingBox2DAnnotations(annotations=[BoundingBox2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[box.class_id], box=box.to_proto(), area=int(box.area), instance_id=box.instance_id, attributes=box.attributes) for box in self.boxlist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self.boxlist)
def __getitem__(self, index):
'Return a single 3D bounding box'
return self.boxlist[index]
def render(self):
'TODO: Batch rendering function for bounding boxes.'
raise NotImplementedError
@property
def ltrb(self):
'Return boxes as (N, 4) np.ndarray in format ([left, top, right, bottom])'
return np.array([box.ltrb for box in self.boxlist], dtype=np.float32)
@property
def ltwh(self):
'Return boxes as (N, 4) np.ndarray in format ([left, top, width, height])'
return np.array([box.ltwh for box in self.boxlist], dtype=np.float32)
@property
def class_ids(self):
'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.array([box.class_id for box in self.boxlist], dtype=np.int64)
@property
def attributes(self):
'Return a list of dictionaries of attribute name to value.'
return [box.attributes for box in self.boxlist]
@property
def instance_ids(self):
return np.array([box.instance_id for box in self.boxlist], dtype=np.int64)
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
|
class BoundingBox3DAnnotationList(Annotation):
'Container for 3D bounding box annotations.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n boxlist: list[BoundingBox3D]\n List of BoundingBox3D objects. See `utils/structures/bounding_box_3d`\n for more details.\n '
def __init__(self, ontology, boxlist):
super().__init__(ontology)
assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load annotation with wrong type of ontology!'
for box in boxlist:
assert isinstance(box, BoundingBox3D), f'Can only instantiate an annotation from a list of BoundingBox3D, not {type(box)}'
self.boxlist = boxlist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n Returns\n -------\n BoundingBox3DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, BoundingBox3DAnnotations)
boxlist = [BoundingBox3D(pose=Pose.load(ann.box.pose), sizes=np.float32([ann.box.width, ann.box.length, ann.box.height]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], instance_id=ann.instance_id, color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {}), num_points=ann.num_points, occlusion=ann.box.occlusion, truncation=ann.box.truncation) for ann in _annotation_pb2.annotations]
return cls(ontology, boxlist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n BoundingBox3DAnnotations\n Annotation as defined `proto/annotations.proto`\n '
return BoundingBox3DAnnotations(annotations=[BoundingBox3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[box.class_id], box=box.to_proto(), instance_id=box.instance_id, attributes=box.attributes, num_points=box.num_points) for box in self.boxlist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n A pathname to a directory to save the annotation object into.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self.boxlist)
def __getitem__(self, index):
'Return a single 3D bounding box'
return self.boxlist[index]
def render(self, image, camera, line_thickness=2, font_scale=0.5):
'Render the 3D boxes in this annotation on the image in place\n\n Parameters\n ----------\n image: np.uint8\n Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.\n Element type must be uint8.\n\n camera: dgp.utils.camera.Camera\n Camera used to render the bounding box.\n\n line_thickness: int, optional\n Thickness of bounding box lines. Default: 2.\n\n font_scale: float, optional\n Font scale used in text labels. Default: 0.5.\n\n Raises\n ------\n ValueError\n Raised if image is not a 3-channel uint8 numpy array.\n TypeError\n Raised if camera is not an instance of Camera.\n '
if ((not isinstance(image, np.ndarray)) or (image.dtype != np.uint8) or (len(image.shape) != 3) or (image.shape[2] != 3)):
raise ValueError('`image` needs to be a 3-channel uint8 numpy array')
if (not isinstance(camera, Camera)):
raise TypeError('`camera` should be of type Camera')
for box in self.boxlist:
box.render(image, camera, line_thickness=line_thickness, class_name=self._ontology.contiguous_id_to_name[box.class_id], font_scale=font_scale)
@property
def poses(self):
'Get poses for bounding boxes in annotation.'
return [box.pose for box in self.boxlist]
@property
def sizes(self):
return np.float32([box.sizes for box in self.boxlist])
@property
def class_ids(self):
'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.int64([box.class_id for box in self.boxlist])
@property
def attributes(self):
'Return a list of dictionaries of attribute name to value.'
return [box.attributes for box in self.boxlist]
@property
def instance_ids(self):
return np.int64([box.instance_id for box in self.boxlist])
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
def project(self, camera):
'Project bounding boxes into a camera and get back 2D bounding boxes in the frustum.\n\n Parameters\n ----------\n camera: Camera\n The Camera instance to project into.\n\n Raises\n ------\n NotImplementedError\n Unconditionally.\n '
raise NotImplementedError
|
class DenseDepthAnnotation(Annotation):
'Container for per-pixel depth annotation.\n\n Parameters\n ----------\n depth: np.ndarray\n 2D numpy float array that stores per-pixel depth.\n '
def __init__(self, depth):
assert isinstance(depth, np.ndarray)
assert (depth.dtype in [np.float32, np.float64])
super().__init__(None)
self._depth = depth
@property
def depth(self):
return self._depth
@classmethod
def load(cls, annotation_file, ontology=None):
'Loads annotation from file into a canonical format for consumption in __getitem__ function in BaseDataset.\n\n Parameters\n ----------\n annotation_file: str\n Full path to NPZ file that stores 2D depth array.\n\n ontology: None\n Dummy ontology argument to meet the usage in `BaseDataset.load_annotation()`.\n '
assert (ontology is None), "'ontology' must be 'None' for {}.".format(cls.__name__)
depth = np.load(annotation_file)['data']
return cls(depth)
@property
def hexdigest(self):
return generate_uid_from_point_cloud(self.depth)
def save(self, save_dir):
'Serialize annotation object if possible, and saved to specified directory.\n Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Path to directory to saved annotation\n\n Returns\n -------\n pointcloud_path: str\n Full path to the output NPZ file.\n '
pointcloud_path = os.path.join(save_dir, '{}.npz'.format(self.hexdigest))
np.savez_compressed(pointcloud_path, data=self.depth)
return pointcloud_path
def render(self):
'TODO: Rendering function for per-pixel depth.'
|
class KeyLine2DAnnotationList(Annotation):
'Container for 2D keyline annotations.\n\n Parameters\n ----------\n ontology: KeyLineOntology\n Ontology for 2D keyline tasks.\n\n linelist: list[KeyLine2D]\n List of KeyLine2D objects. See `dgp/utils/structures/key_line_2d` for more details.\n '
def __init__(self, ontology, linelist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyLineOntology), 'Trying to load annotation with wrong type of ontology!'
for line in linelist:
assert isinstance(line, KeyLine2D), f'Can only instantate an annotation from a list of KeyLine2D, not {type(line)}'
self.linelist = linelist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyLineOntology\n Ontology for 2D keyline tasks.\n\n Returns\n -------\n KeyLine2DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, KeyLine2DAnnotations)
linelist = [KeyLine2D(line=np.float32([[vertex.x, vertex.y] for vertex in ann.vertices]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, linelist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n KeyLine2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n '
return KeyLine2DAnnotations(annotations=[KeyLine2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[line.class_id], vertices=[KeyPoint2D(point=np.float32([x, y]), class_id=line.class_id, instance_id=line.instance_id, color=line.color, attributes=line.attributes).to_proto() for (x, y) in zip(line.x, line.y)], attributes=line.attributes) for line in self.linelist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation.\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self.linelist)
def __getitem__(self, index):
'Return a single 2D keyline'
return self.linelist[index]
def render(self):
'TODO: Batch rendering function for keylines.'
raise NotImplementedError
@property
def xy(self):
'Return lines as (N, 2) np.ndarray in format ([x, y])'
return np.array([line.xy.tolist() for line in self.linelist], dtype=np.float32)
@property
def class_ids(self):
'Return class ID for each line, with ontology applied:\n class IDs mapped to a contiguous set.\n '
return np.array([line.class_id for line in self.linelist], dtype=np.int64)
@property
def attributes(self):
'Return a list of dictionaries of attribut name to value.'
return [line.attributes for line in self.linelist]
@property
def instance_ids(self):
return np.array([line.instance_id for line in self.linelist], dtype=np.int64)
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
|
class KeyLine3DAnnotationList(Annotation):
'Container for 3D keyline annotations.\n\n Parameters\n ----------\n ontology: KeyLineOntology\n Ontology for 3D keyline tasks.\n\n linelist: list[KeyLine3D]\n List of KeyLine3D objects. See `dgp/utils/structures/key_line_3d` for more details.\n '
def __init__(self, ontology, linelist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyLineOntology), 'Trying to load annotation with wrong type of ontology!'
for line in linelist:
assert isinstance(line, KeyLine3D), f'Can only instantate an annotation from a list of KeyLine3D, not {type(line)}'
self._linelist = linelist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyLineOntology\n Ontology for 3D keyline tasks.\n\n Returns\n -------\n KeyLine3DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, KeyLine3DAnnotations)
linelist = [KeyLine3D(line=np.float32([[vertex.x, vertex.y, vertex.z] for vertex in ann.vertices]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, linelist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n KeyLine3DAnnotations\n Annotation as defined in `proto/annotations.proto`\n '
return KeyLine3DAnnotations(annotations=[KeyLine3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[line.class_id], vertices=[KeyPoint3D(point=np.float32([x, y, z]), class_id=line.class_id, instance_id=line.instance_id, color=line.color, attributes=line.attributes).to_proto() for (x, y, z) in zip(line.x, line.y, line.z)], attributes=line.attributes) for line in self._linelist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation.\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self._linelist)
def __getitem__(self, index):
'Return a single 3D keyline'
return self._linelist[index]
def render(self):
'Batch rendering function for keylines.'
raise NotImplementedError
@property
def xyz(self):
'Return lines as (N, 3) np.ndarray in format ([x, y, z])'
return np.array([line.xyz.tolist() for line in self._linelist], dtype=np.float32)
@property
def class_ids(self):
'Return class ID for each line, with ontology applied:\n class IDs mapped to a contiguous set.\n '
return np.array([line.class_id for line in self._linelist], dtype=np.int64)
@property
def attributes(self):
'Return a list of dictionaries of attribute name to value.'
return [line.attributes for line in self._linelist]
@property
def instance_ids(self):
return np.array([line.instance_id for line in self._linelist], dtype=np.int64)
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
|
class KeyPoint2DAnnotationList(Annotation):
'Container for 2D keypoint annotations.\n\n Parameters\n ----------\n ontology: KeyPointOntology\n Ontology for 2D keypoint tasks.\n\n pointlist: list[KeyPoint2D]\n List of KeyPoint2D objects. See `dgp/utils/structures/key_point_2d` for more details.\n '
def __init__(self, ontology, pointlist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyPointOntology), 'Trying to load annotation with wrong type of ontology!'
for point in pointlist:
assert isinstance(point, KeyPoint2D), f'Can only instantate an annotation from a list of KeyPoint2D, not {type(point)}'
self.pointlist = pointlist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyPointOntology\n Ontology for 2D keypoint tasks.\n\n Returns\n -------\n KeyPoint2DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, KeyPoint2DAnnotations)
pointlist = [KeyPoint2D(point=np.float32([ann.point.x, ann.point.y]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, pointlist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n KeyPoint2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n '
return KeyPoint2DAnnotations(annotations=[KeyPoint2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[point.class_id], point=point.to_proto(), attributes=point.attributes) for point in self.pointlist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self.pointlist)
def __getitem__(self, index):
'Return a single 2D keypoint'
return self.pointlist[index]
def render(self):
'TODO: Batch rendering function for keypoints.'
raise NotImplementedError
@property
def xy(self):
'Return points as (N, 2) np.ndarray in format ([x, y])'
return np.array([point.xy for point in self.pointlist], dtype=np.float32)
@property
def class_ids(self):
'Return class ID for each point, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.array([point.class_id for point in self.pointlist], dtype=np.int64)
@property
def attributes(self):
'Return a list of dictionaries of attribut name to value.'
return [point.attributes for point in self.pointlist]
@property
def instance_ids(self):
return np.array([point.instance_id for point in self.pointlist], dtype=np.int64)
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
|
class KeyPoint3DAnnotationList(Annotation):
'Container for 3D keypoint annotations.\n\n Parameters\n ----------\n ontology: KeyPointOntology\n Ontology for 3D keypoint tasks.\n\n pointlist: list[KeyPoint3D]\n List of KeyPoint3D objects. See `dgp/utils/structures/key_point_3d` for more details.\n '
def __init__(self, ontology, pointlist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyPointOntology), 'Trying to load annotation with wrong type of ontology!'
for point in pointlist:
assert isinstance(point, KeyPoint3D), f'Can only instantate an annotation from a list of KeyPoint3D, not {type(point)}'
self._pointlist = pointlist
@classmethod
def load(cls, annotation_file, ontology):
'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyPointOntology\n Ontology for 3D keypoint tasks.\n\n Returns\n -------\n KeyPoint3DAnnotationList\n Annotation object instantiated from file.\n '
_annotation_pb2 = parse_pbobject(annotation_file, KeyPoint3DAnnotations)
pointlist = [KeyPoint3D(point=np.float32([ann.point.x, ann.point.y, ann.point.z]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, pointlist)
def to_proto(self):
'Return annotation as pb object.\n\n Returns\n -------\n KeyPoint3DAnnotations\n Annotation as defined in `proto/annotations.proto`\n '
return KeyPoint3DAnnotations(annotations=[KeyPoint3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[point.class_id], point=point.to_proto(), attributes=point.attributes) for point in self._pointlist])
def save(self, save_dir):
'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n '
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self._pointlist)
def __getitem__(self, index):
'Return a single 3D keypoint'
return self._pointlist[index]
def render(self):
'Batch rendering function for keypoints.'
raise NotImplementedError
@property
def xyz(self):
'Return points as (N, 3) np.ndarray in format ([x, y, z])'
return np.array([point.xyz for point in self._pointlist], dtype=np.float32)
@property
def class_ids(self):
'Return class ID for each point, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n '
return np.array([point.class_id for point in self._pointlist], dtype=np.int64)
@property
def attributes(self):
'Return a list of dictionaries of attribut name to value.'
return [point.attributes for point in self._pointlist]
@property
def instance_ids(self):
return np.array([point.instance_id for point in self._pointlist], dtype=np.int64)
@property
def hexdigest(self):
'Reproducible hash of annotation.'
return generate_uid_from_pbobject(self.to_proto())
|
class Ontology():
'Ontology object. At bare minimum, we expect ontologies to provide:\n ID: (int) identifier for class\n Name: (str) string identifier for class\n Color: (tuple) color RGB tuple\n\n Based on the task, additional fields may be populated. Refer to `dataset.proto` and `ontology.proto`\n specifications for more details. Can be constructed from file or from deserialized proto object.\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n '
VOID_ID = 255
VOID_CLASS = 'Void'
def __init__(self, ontology_pb2):
self._ontology = ontology_pb2
if isinstance(self._ontology, OntologyV1Pb2):
self._name_to_id = OrderedDict(sorted(self._ontology.name_to_id.items()))
self._id_to_name = OrderedDict(sorted(self._ontology.id_to_name.items()))
self._colormap = OrderedDict(sorted([(_id, (_color.r, _color.g, _color.b)) for (_id, _color) in self._ontology.colormap.items()]))
self._isthing = OrderedDict(sorted(self._ontology.isthing.items()))
elif isinstance(self._ontology, OntologyV2Pb2):
self._name_to_id = OrderedDict(sorted([(ontology_item.name, ontology_item.id) for ontology_item in self._ontology.items]))
self._id_to_name = OrderedDict(sorted([(ontology_item.id, ontology_item.name) for ontology_item in self._ontology.items]))
self._colormap = OrderedDict(sorted([(ontology_item.id, (ontology_item.color.r, ontology_item.color.g, ontology_item.color.b)) for ontology_item in self._ontology.items]))
self._isthing = OrderedDict(sorted([(ontology_item.id, ontology_item.isthing) for ontology_item in self._ontology.items]))
else:
raise TypeError('Unexpected type {}, expected OntologyV1 or OntologyV2'.format(type(self._ontology)))
self._class_ids = sorted(self._id_to_name.keys())
self._class_names = [self._id_to_name[c_id] for c_id in self._class_ids]
@classmethod
def load(cls, ontology_file):
'Construct an ontology from an ontology JSON.\n\n Parameters\n ----------\n ontology_file: str\n Path to ontology JSON\n\n Raises\n ------\n FileNotFoundError\n Raised if ontology_file does not exist.\n Exception\n Raised if we could not open the ontology file for some reason.\n '
if os.path.exists(ontology_file):
ontology_pb2 = open_ontology_pbobject(ontology_file)
else:
raise FileNotFoundError('Could not find {}'.format(ontology_file))
if (ontology_pb2 is not None):
return cls(ontology_pb2)
raise Exception('Could not open ontology {}'.format(ontology_file))
def to_proto(self):
'Serialize ontology. Only supports exporting in OntologyV2.\n\n Returns\n -------\n OntologyV2Pb2\n Serialized ontology\n '
return OntologyV2Pb2(items=[OntologyItem(name=name, id=class_id, color=OntologyItem.Color(r=self._colormap[class_id][0], g=self._colormap[class_id][1], b=self._colormap[class_id][2]), isthing=self._isthing[class_id]) for (class_id, name) in self._id_to_name.items()])
def save(self, save_dir):
'Write out ontology items to `<sha>.json`. SHA generated from Ontology proto object.\n\n Parameters\n ----------\n save_dir: str\n Directory in which to save serialized ontology.\n\n Returns\n -------\n output_ontology_file: str\n Path to serialized ontology file.\n '
os.makedirs(save_dir, exist_ok=True)
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
@property
def num_classes(self):
return len(self._class_ids)
@property
def class_names(self):
return self._class_names
@property
def class_ids(self):
return self._class_ids
@property
def name_to_id(self):
return self._name_to_id
@property
def id_to_name(self):
return self._id_to_name
@property
def colormap(self):
return self._colormap
@property
def isthing(self):
return self._isthing
@property
def hexdigest(self):
'Hash object'
return generate_uid_from_pbobject(self.to_proto())
def __eq__(self, other):
return (self.hexdigest == other.hexdigest)
def __repr__(self):
return '{}[{}]'.format(self.__class__.__name__, os.path.basename(self.hexdigest))
|
class BoundingBoxOntology(Ontology):
'Implements lookup tables specific to 2D bounding box tasks.\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n '
def __init__(self, ontology_pb2):
super().__init__(ontology_pb2)
self._thing_class_ids = [class_id for (class_id, isthing) in self._isthing.items() if isthing]
self._class_id_to_contiguous_id = OrderedDict(((class_id, (contiguous_id + 1)) for (contiguous_id, class_id) in enumerate(self._thing_class_ids)))
self._contiguous_id_to_class_id = OrderedDict(((contiguous_id, class_id) for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items()))
self._contiguous_id_to_name = OrderedDict(((contiguous_id, self._id_to_name[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items()))
self._name_to_contiguous_id = OrderedDict(((name, contiguous_id) for (contiguous_id, name) in self._contiguous_id_to_name.items()))
self._contiguous_id_colormap = OrderedDict(((contiguous_id, self._colormap[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items()))
self._class_names = [self._id_to_name[c_id] for c_id in self._thing_class_ids]
@property
def num_classes(self):
return len(self._thing_class_ids)
@property
def class_names(self):
return self._class_names
@property
def thing_class_ids(self):
return self._thing_class_ids
@property
def class_id_to_contiguous_id(self):
return self._class_id_to_contiguous_id
@property
def contiguous_id_to_class_id(self):
return self._contiguous_id_to_class_id
@property
def contiguous_id_to_name(self):
return self._contiguous_id_to_name
@property
def name_to_contiguous_id(self):
return self._name_to_contiguous_id
@property
def contiguous_id_colormap(self):
return self._contiguous_id_colormap
|
class AgentBehaviorOntology(BoundingBoxOntology):
'Agent behavior ontologies derive directly from bounding box ontologies'
|
class KeyPointOntology(BoundingBoxOntology):
'Keypoint ontologies derive directly from bounding box ontologies'
|
class KeyLineOntology(BoundingBoxOntology):
'Keyline ontologies derive directly from bounding box ontologies'
|
class InstanceSegmentationOntology(BoundingBoxOntology):
'Instance segmentation ontologies derive directly from bounding box ontologies'
|
class SemanticSegmentationOntology(Ontology):
'Implements lookup tables for semantic segmentation\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n '
def __init__(self, ontology_pb2):
super().__init__(ontology_pb2)
self._class_id_to_contiguous_id = OrderedDict(((class_id, contiguous_id) for (contiguous_id, class_id) in enumerate(self._class_ids)))
self._contiguous_id_to_class_id = OrderedDict(((contiguous_id, class_id) for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items()))
self._contiguous_id_to_name = OrderedDict(((contiguous_id, self._id_to_name[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items()))
self._name_to_contiguous_id = OrderedDict(((name, contiguous_id) for (contiguous_id, name) in self._contiguous_id_to_name.items()))
self._contiguous_id_colormap = OrderedDict(((contiguous_id, self._colormap[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items()))
self._label_lookup = (np.ones((max(self.class_ids) + 1), dtype=np.uint8) * self.VOID_ID)
for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items():
self._label_lookup[class_id] = contiguous_id
@property
def label_lookup(self):
return self._label_lookup
@property
def class_id_to_contiguous_id(self):
return self._class_id_to_contiguous_id
@property
def contiguous_id_to_class_id(self):
return self._contiguous_id_to_class_id
@property
def contiguous_id_to_name(self):
return self._contiguous_id_to_name
@property
def name_to_contiguous_id(self):
return self._name_to_contiguous_id
@property
def contiguous_id_colormap(self):
return self._contiguous_id_colormap
|
def remap_bounding_box_annotations(bounding_box_annotations, lookup_table, original_ontology, remapped_ontology):
"\n Parameters\n ----------\n bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList\n Annotations to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: BoundingBoxOntology\n Ontology we are remapping annotations from\n\n remapped_ontology: BoundingBoxOntology\n Ontology we are mapping annotations to\n\n Returns\n -------\n remapped_bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList\n Remapped annotations with the same type of bounding_box_annotations\n "
assert (isinstance(original_ontology, BoundingBoxOntology) and isinstance(remapped_ontology, BoundingBoxOntology))
remapped_boxlist = []
for box in bounding_box_annotations:
original_class_name = original_ontology.contiguous_id_to_name[box.class_id]
if (original_class_name in lookup_table):
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
box.class_id = remapped_class_id
remapped_boxlist.append(box)
annotation_type = type(bounding_box_annotations)
return annotation_type(remapped_ontology, remapped_boxlist)
|
def remap_semantic_segmentation_2d_annotation(semantic_segmentation_annotation, lookup_table, original_ontology, remapped_ontology):
"\n Parameters\n ----------\n semantic_segmentation_annotation: SemanticSegmentation2DAnnotation\n Annotation to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: SemanticSegmentationOntology\n Ontology we are remapping annotation from\n\n remapped_ontology: SemanticSegmentationOntology\n Ontology we are mapping annotation to\n\n Returns\n -------\n remapped_semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation\n Remapped annotation\n "
assert (isinstance(original_ontology, SemanticSegmentationOntology) and isinstance(remapped_ontology, SemanticSegmentationOntology))
original_segmentation_image = semantic_segmentation_annotation.label
remapped_segmentation_image = (np.ones_like(original_segmentation_image) * Ontology.VOID_ID)
for class_name in lookup_table:
remapped_segmentation_image[(original_segmentation_image == original_ontology.name_to_contiguous_id[class_name])] = remapped_ontology.name_to_contiguous_id[lookup_table[class_name]]
return SemanticSegmentation2DAnnotation(remapped_ontology, remapped_segmentation_image)
|
def remap_instance_segmentation_2d_annotation(instance_segmentation_annotation, lookup_table, original_ontology, remapped_ontology):
"\n Parameters\n ----------\n instance_segmentation_annotation: PanopticSegmentation2DAnnotation\n Annotation to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: InstanceSegmentationOntology\n Ontology we are remapping annotation from\n\n remapped_ontology: InstanceSegmentationOntology\n Ontology we are mapping annotation to\n\n Returns\n -------\n PanopticSegmentation2DAnnotation:\n Remapped annotation\n "
assert (isinstance(original_ontology, InstanceSegmentationOntology) and isinstance(remapped_ontology, InstanceSegmentationOntology))
remapped_masklist = []
for instance_mask in instance_segmentation_annotation:
original_class_name = original_ontology.contiguous_id_to_name[instance_mask.class_id]
if (original_class_name in lookup_table):
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
instance_mask.class_id = remapped_class_id
remapped_masklist.append(instance_mask)
assert isinstance(instance_segmentation_annotation, PanopticSegmentation2DAnnotation)
return PanopticSegmentation2DAnnotation.from_masklist(remapped_masklist, remapped_ontology, instance_segmentation_annotation.panoptic_image.shape, instance_segmentation_annotation.panoptic_image_dtype)
|
def construct_remapped_ontology(ontology, lookup, annotation_key):
"Given an Ontology object and a lookup from old class names to new class names, construct\n an ontology proto for the new ontology that results\n\n Parameters\n ----------\n ontology: dgp.annotations.Ontology\n Ontology we are trying to remap using `lookup`\n eg. ontology.id_to_name = {0: 'Car', 1: 'Truck', 2: 'Motrocycle'}\n\n lookup: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n NOTE: `lookup` needs to be exhaustive; any classes that the user wants to have in returned\n ontology need to be remapped explicitly\n\n annotation_key: str\n Annotation key of Ontology\n e.g. `bounding_box_2d`\n\n Returns\n -------\n remapped_ontology_pb2: dgp.proto.ontology_pb2.Ontology\n Ontology defined by applying `lookup` on original `ontology`\n\n NOTE: This is constructed by iterating over class names in `lookup.keys()` in\n alphabetical order, so if both 'Car' and 'Motorcycle' get remapped to 'DynamicObject', the\n color for 'DynamicObject' will be the original color for 'Car'\n\n Any class names not in `lookup` are dropped\n\n Notes\n -----\n This could be a class function of `Ontology`\n "
assert isinstance(ontology, Ontology), f'Expected Ontology, got {type(ontology)}'
remapped_class_name_to_original_class_names = OrderedDict()
for (class_name, remapped_class_name) in lookup.items():
if (remapped_class_name not in remapped_class_name_to_original_class_names):
remapped_class_name_to_original_class_names[remapped_class_name] = []
remapped_class_name_to_original_class_names[remapped_class_name].append(class_name)
remapped_class_name_to_original_class_names = {k: sorted(v) for (k, v) in remapped_class_name_to_original_class_names.items()}
remapped_ontology_pb2 = OntologyPB2()
for (remapped_class_id, (remapped_class_name, original_class_names)) in enumerate(remapped_class_name_to_original_class_names.items()):
original_class_ids = [ontology.name_to_id[class_name] for class_name in original_class_names]
isthing = [ontology.isthing[class_id] for class_id in original_class_ids]
if (annotation_key == 'semantic_segmentation_2d'):
isthing = False
else:
assert (len(set(isthing)) == 1), 'Classes mapping to the same class are either all things or all stuff'
isthing = isthing[0]
remapped_class_color = ontology.colormap[original_class_ids[0]]
remapped_ontology_pb2.items.extend([OntologyItem(name=remapped_class_name, id=remapped_class_id, isthing=isthing, color=OntologyItem.Color(r=remapped_class_color[0], g=remapped_class_color[1], b=remapped_class_color[2]))])
if ((annotation_key == 'semantic_segmentation_2d') and (not (Ontology.VOID_CLASS in remapped_class_name_to_original_class_names))):
remapped_ontology_pb2.items.extend([OntologyItem(name=Ontology.VOID_CLASS, id=Ontology.VOID_ID, isthing=False, color=OntologyItem.Color(r=0, g=0, b=0))])
return remapped_ontology_pb2
|
class Compose():
'Composes several transforms together.\n\n Parameters\n ----------\n transforms\n List of transforms to compose __call__ method that takes in an OrderedDict\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n '
def __init__(self, transforms):
if (not all([isinstance(t, BaseTransform) for t in transforms])):
raise TypeError('All transforms used in Compose should inherit from `BaseTransform`')
self.transforms = transforms
def __call__(self, data):
for t in self.transforms:
data = t(data)
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
|
class BaseTransform():
"\n Base transform class that other transforms should inherit from. Simply ensures that\n input type to `__call__` is an OrderedDict (in general usage this dict will include\n keys such as 'rgb', 'bounding_box_2d', etc. i.e. raw data and annotations)\n\n cf. `OntologyMapper` for an example\n "
def __call__(self, data):
if ((not isinstance(data, OrderedDict)) and (not (isinstance(data, list) and isinstance(data[0], list) and isinstance(data[0][0], OrderedDict)))):
raise TypeError('`BaseTransform` expects input of type `OrderedDict` or list of list of `OrderedDict`.')
return self.transform(data)
def transform(self, data):
"\n Parameters\n ----------\n data: OrderedDict or list[list[OrderedDict]]\n dataset item as returned by `_SynchronizedDataset' or `_FrameDataset`.\n\n Returns\n -------\n OrderedDict or list[list[OrderedDict]]:\n Same type with input with transformations applied to dataset item.\n "
if isinstance(data, OrderedDict):
return self.transform_datum(data)
elif isinstance(data, list):
return [self.transform_sample(sample) for sample in data]
def transform_datum(self, datum):
raise NotImplementedError
def transform_sample(self, sample):
raise NotImplementedError
|
class OntologyMapper(BaseTransform):
'\n Mapping ontology based on a lookup_table.\n The remapped ontology will base on the remapped_ontology_table if provided.\n Otherwise, the remapped ontology will be automatically constructed based on the order of lookup_table.\n\n Parameters\n ----------\n original_ontology_table: dict[str->dgp.annotations.Ontology]\n Ontology object *per annotation type*\n The original ontology table.\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n "bounding_box_3d": BoundingBoxOntology[<ontology_sha>],\n }\n\n lookup_table: dict[str->dict]\n Lookup table *per annotation type* for each of the classes the user wants to remap.\n Lookups are old class name to new class name\n\n e.g.:\n {\n \'bounding_box_2d\': {\n \'Car\': \'Car\',\n \'Truck\': \'Car\',\n \'Motorcycle\': \'Motorcycle\'\n }\n ...\n }\n\n remapped_ontology_table: dict[str->dgp.annotations.Ontology]\n Ontology object *per annotation type*\n If specified, the ontology will be remapped to the given remapped_ontology_table.\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n "bounding_box_3d": BoundingBoxOntology[<ontology_sha>],\n }\n '
SUPPORTED_ANNOTATION_TYPES = ('bounding_box_2d', 'semantic_segmentation_2d', 'bounding_box_3d', 'instance_segmentation_2d')
def __init__(self, original_ontology_table, lookup_table, remapped_ontology_table=None):
for annotation_key in lookup_table:
if (annotation_key not in self.SUPPORTED_ANNOTATION_TYPES):
raise ValueError(f'annotation_key {annotation_key} not supported for remapping yet, we accept PRs')
if (annotation_key not in original_ontology_table):
raise ValueError(f'annotation_key {annotation_key} needs to be present in `ontology_table`')
self.lookup_table = lookup_table
self.original_ontology_table = original_ontology_table
self.remapped_ontology_table = {}
for (annotation_key, lookup) in self.lookup_table.items():
assert all([(class_name in original_ontology_table[annotation_key].class_names) for class_name in lookup.keys()]), 'All keys in `lookup` need to be valid class names in specified `ontology`'
if ((remapped_ontology_table is not None) and (annotation_key in remapped_ontology_table)):
self.remapped_ontology_table[annotation_key] = remapped_ontology_table[annotation_key]
else:
remapped_ontology_pb2 = construct_remapped_ontology(original_ontology_table[annotation_key], lookup, annotation_key)
self.remapped_ontology_table[annotation_key] = ONTOLOGY_REGISTRY[annotation_key](remapped_ontology_pb2)
assert all([(class_name in self.remapped_ontology_table[annotation_key].class_names) for class_name in lookup.values()]), 'All values in `lookup` need to be valid class names in specified `remapped_ontology`'
def transform_datum(self, datum):
"\n Parameters\n ----------\n datum: OrderedDict\n Dictionary containing raw data and annotations, with keys such as:\n 'rgb', 'intrinsics', 'bounding_box_2d'.\n All annotation_keys in `self.lookup_table` (and `self.remapped_ontology_table`)\n are expected to be contained\n\n Returns\n -------\n datum: OrderedDict\n Same dictionary but with annotations in `self.lookup_table` remapped to desired ontologies\n\n Raises\n ------\n ValueError\n Raised if the datum to remap does not contain all expected annotations.\n "
if (not all([(annotation_key in datum) for annotation_key in self.remapped_ontology_table])):
raise ValueError('The data you are trying to remap does not have all annotations it expects')
for (annotation_key, remapped_ontology) in self.remapped_ontology_table.items():
lookup_table = self.lookup_table[annotation_key]
original_ontology = datum[annotation_key].ontology
if ((annotation_key == 'bounding_box_2d') or (annotation_key == 'bounding_box_3d')):
datum[annotation_key] = remap_bounding_box_annotations(datum[annotation_key], lookup_table, original_ontology, remapped_ontology)
elif (annotation_key == 'semantic_segmentation_2d'):
datum[annotation_key] = remap_semantic_segmentation_2d_annotation(datum[annotation_key], lookup_table, original_ontology, remapped_ontology)
elif (annotation_key == 'instance_segmentation_2d'):
datum[annotation_key] = remap_instance_segmentation_2d_annotation(datum[annotation_key], lookup_table, original_ontology, remapped_ontology)
return datum
|
class AddLidarCuboidPoints(BaseTransform):
'Populate the num_points field for bounding_box_3d'
def __init__(self, subsample: int=1) -> None:
'Populate the num_points field for bounding_box_3d. Optionally downsamples the point cloud for speed.\n\n Parameters\n ----------\n subsample: int, default: 1\n Fraction of point cloud to use for computing the number of points. i.e., subsample=10 indicates that\n 1/10th of the points should be used.\n '
super().__init__()
self.subsample = subsample
def transform_datum(self, datum: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Populate the num_points field for bounding_box_3d\n Parameters\n ----------\n datum: Dict[str,Any]\n A dgp lidar or point cloud datum. Must contain keys bounding_box_3d and point_cloud\n\n Returns\n -------\n datum: Dict[str,Any]\n The datum with num_points added to the cuboids\n '
if ('bounding_box_3d' not in datum):
return datum
boxes = datum['bounding_box_3d']
if ((boxes is None) or (len(boxes) == 0)):
return datum
assert ('point_cloud' in datum), 'datum should contain point_cloud key'
point_cloud = datum['point_cloud']
if (self.subsample > 1):
N = point_cloud.shape[0]
sample_idx = np.random.choice(N, (N // self.subsample))
point_cloud = point_cloud[sample_idx].copy()
for box in boxes:
if (box.num_points == 0):
in_cuboid = points_in_cuboid(point_cloud, box)
box._num_points = (np.sum(in_cuboid) * self.subsample)
return datum
|
class InstanceMaskVisibilityFilter(BaseTransform):
'Given a multi-modal camera data, select instances whose instance masks appear big enough *at least in one camera*.\n\n For example, even when an object is mostly truncated in one camera, if it looks big enough in a neighboring\n camera in the multi-modal sample, it will be included in the annotations. In the transformed dataset item, all detection\n annotations (i.e. `bounding_box_3d`, `bounding_box_2d`, `instance_segmentation_2d\') contain a single set of instances.\n\n Parameters\n ----------\n camera_datum_names: list[str]\n Names of camera datums to be used in visibility computation.\n The definition of "visible" is that an instance has large mask at least in one of these cameras.\n\n min_mask_size: int, default: 300\n Minimum number of foreground pixels in instance mask for an instance to be added to annotations.\n\n use_amodal_bbox2d_annotations: bool, default: False\n If True, then use "amodal" bounding box (i.e. the box includes occluded/truncated parts) for 2D bounding box annotation.\n If False, then use "modal" bounding box (i.e. tight bounding box of instance mask.)\n '
def __init__(self, camera_datum_names, min_mask_size=300, use_amodal_bbox2d_annotations=False):
self._camera_datum_names = camera_datum_names
self._min_mask_size = min_mask_size
self._use_amodal_bbox2d_annotations = use_amodal_bbox2d_annotations
def transform_sample(self, sample):
'Main entry point for filtering a multimodal sample using instance masks.\n\n Parameters\n ----------\n sample: list[OrderedDict]\n Multimodal sample as returned by `__getitem__()` of `_SynchronizedDataset`.\n\n Returns\n -------\n new_sample: list[OrderedDict]\n Multimodal sample with all detection annotations are filtered.\n\n Raises\n ------\n ValueError\n Raised if a 2D or 3D bounding box instance lacks any required instance IDs.\n '
cam_datums = [datum for datum in sample if (datum['datum_name'] in self._camera_datum_names)]
visible_instance_ids = set()
in_frustum_instance_ids_per_camera = {}
(id_to_bbox3d_per_camera, id_to_mask2d_per_camera, id_to_bbox2d_per_camera) = ({}, {}, {})
for datum in cam_datums:
datum_name = datum['datum_name']
in_frustum_instance_ids_per_camera[datum_name] = [mask.instance_id for mask in datum['instance_segmentation_2d']]
id_to_bbox3d = {bbox3d.instance_id: bbox3d for bbox3d in datum['bounding_box_3d']}
id_to_mask2d = {mask2d.instance_id: mask2d for mask2d in datum['instance_segmentation_2d']}
id_to_bbox3d_per_camera[datum_name] = id_to_bbox3d
id_to_mask2d_per_camera[datum_name] = id_to_mask2d
if self._use_amodal_bbox2d_annotations:
id_to_bbox2d = {bbox2d.instance_id: bbox2d for bbox2d in datum['bounding_box_2d']}
id_to_bbox2d_per_camera[datum_name] = id_to_bbox2d
if self._use_amodal_bbox2d_annotations:
in_frustum_instance_ids_per_camera[datum_name] = [_id for _id in in_frustum_instance_ids_per_camera[datum_name] if ((_id in id_to_bbox2d) and (_id in id_to_bbox3d))]
else:
in_frustum_instance_ids_per_camera[datum_name] = [_id for _id in in_frustum_instance_ids_per_camera[datum_name] if (_id in id_to_bbox3d)]
ids_missing_in_bbox3d = list((set(in_frustum_instance_ids_per_camera[datum_name]) - set(id_to_bbox3d)))
if ids_missing_in_bbox3d:
raise ValueError('Missing instances from `bounding_box_3d`: {:s}'.format(', '.join(sorted(ids_missing_in_bbox3d))))
if self._use_amodal_bbox2d_annotations:
ids_missing_in_bbox2d = list((set(in_frustum_instance_ids_per_camera[datum_name]) - set(id_to_bbox2d)))
if ids_missing_in_bbox2d:
raise ValueError('Missing instances from `bounding_box_2d`: {:s}'.format(', '.join(sorted(ids_missing_in_bbox2d))))
for instance_mask in datum['instance_segmentation_2d']:
if (instance_mask.area >= self._min_mask_size):
visible_instance_ids.add(instance_mask.instance_id)
new_sample = sample
for datum in new_sample:
datum_name = datum['datum_name']
if (datum_name not in self._camera_datum_names):
continue
(new_boxlist_3d, new_boxlist_2d, new_masklist_2d) = ([], [], [])
for instance_id in in_frustum_instance_ids_per_camera[datum_name]:
if (instance_id in visible_instance_ids):
new_boxlist_3d.append(id_to_bbox3d_per_camera[datum_name][instance_id])
mask2d = id_to_mask2d_per_camera[datum_name][instance_id]
if self._use_amodal_bbox2d_annotations:
bbox2d = id_to_bbox2d_per_camera[datum_name][instance_id]
else:
(yy, xx) = mask2d.bitmask.nonzero()
(y1, y2) = (np.min(yy), np.max(yy))
(x1, x2) = (np.min(xx), np.max(xx))
bbox2d = BoundingBox2D(box=np.float32([x1, y1, x2, y2]), class_id=mask2d.class_id, instance_id=mask2d.instance_id, attributes=mask2d.attributes, mode='ltrb')
new_boxlist_2d.append(bbox2d)
new_masklist_2d.append(mask2d)
datum['bounding_box_3d'] = BoundingBox3DAnnotationList(datum['bounding_box_3d'].ontology, new_boxlist_3d)
datum['bounding_box_2d'] = BoundingBox2DAnnotationList(datum['bounding_box_2d'].ontology, new_boxlist_2d)
datum['instance_segmentation_2d'] = PanopticSegmentation2DAnnotation.from_masklist(new_masklist_2d, datum['instance_segmentation_2d'].ontology, mask_shape=(datum['rgb'].height, datum['rgb'].width))
return new_sample
def transform_datum(self, datum):
'Main entry point for filtering a single-modal datum using instance masks.\n\n Parameters\n ----------\n datum: OrderedDict\n Single-modal datum as returned by `__getitem__()` of `_FrameDataset`.\n\n Returns\n -------\n new_datum: OrderedDict\n Single-modal sample with all detection annotations are filtered.\n '
return self.transform_sample([datum])[0]
|
class BoundingBox3DCoalescer(BaseTransform):
'Coalesce 3D bounding box annotation from multiple datums and use it as an annotation of target datum.\n The bounding boxes are brought into the target datum frame.\n\n Parameters\n ----------\n src_datum_names: list[str]\n List of datum names used to create a list of coalesced bounding boxes.\n\n dst_datum_name: str\n Datum whose `bounding_box_3d` is replaced by the coelesced bounding boxes.\n\n drop_src_datums: bool, default: True\n If True, then remove the source datums in the transformed sample.\n '
def __init__(self, src_datum_names, dst_datum_name, drop_src_datums=True):
self._src_datum_names = src_datum_names
self._dst_datum_name = dst_datum_name
self._drop_src_datums = drop_src_datums
def transform_sample(self, sample):
'Main entry point for coalescing 3D bounding boxes.\n\n Parameters\n ----------\n sample: list[OrderedDict]\n Multimodal sample as returned by `__getitem__()` of `_SynchronizedDataset`.\n\n Returns\n -------\n new_sample: list[OrderedDict]\n Multimodal sample with updated 3D bounding box annotations.\n\n Raises\n ------\n ValueError\n Raised if there are multiple instances of the same kind of datum in a sample.\n '
(datums, src_datum_inds, dst_datum_ind) = (OrderedDict(), [], [])
for (idx, datum) in enumerate(sample):
if (datum['datum_name'] in self._src_datum_names):
src_datum_inds.append(idx)
elif (datum['datum_name'] == self._dst_datum_name):
dst_datum_ind.append(idx)
datums[idx] = datum
if (len(dst_datum_ind) != 1):
raise ValueError('There must be one {:s} datum.'.format(self._dst_datum_name))
dst_datum_ind = dst_datum_ind[0]
(bbox_3d_V_merged, instance_ids_merged) = ([], [])
dst_datum = datums[dst_datum_ind]
for idx in src_datum_inds:
src_datum = datums[idx]
p_src_dst = (dst_datum['pose'].inverse() * src_datum['pose'])
for bbox_3d in src_datum['bounding_box_3d']:
if (bbox_3d.instance_id not in instance_ids_merged):
instance_ids_merged.append(bbox_3d.instance_id)
bbox_3d_V_merged.append((p_src_dst * bbox_3d))
ontology = dst_datum['bounding_box_3d'].ontology
coalesced_bbox3d_annotation = BoundingBox3DAnnotationList(ontology, bbox_3d_V_merged)
dst_datum['bounding_box_3d'] = coalesced_bbox3d_annotation
transformed_sample = []
for (idx, datum) in enumerate(sample):
if (idx in src_datum_inds):
if (not self._drop_src_datums):
transformed_sample.append(datum)
elif (idx == dst_datum_ind):
transformed_sample.append(dst_datum)
else:
transformed_sample.append(datum)
return transformed_sample
|
@click.group()
@click.version_option()
def cli():
logging.getLogger().setLevel(level=logging.INFO)
|
@cli.command(name='visualize-scene')
@add_options(options=VISUALIZE_OPTIONS)
@click.option('--scene-json', required=True, help='Path to Scene JSON')
def visualize_scene(scene_json, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items, video_fps, dst_dir, verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud, render_raw):
'Parallelized visualizing of a scene.\n\n Example\n -------\n $ cli.py visualize-scene --scene-json tests/data/dgp/test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json --dst-dir /mnt/fsx -a bounding_box_3d -c camera_01\n '
if verbose:
logging.basicConfig(level=logging.INFO)
base_path = os.path.dirname(scene_json)
if (dst_dir is not None):
video_path = (os.path.basename(base_path) + '.avi')
logging.info('Visualizing scene {} into {}'.format(os.path.basename(base_path), dst_dir))
else:
video_file = None
scene_dataset_class = (ParallelDomainScene if (dataset_class == 'ParallelDomainScene') else SynchronizedScene)
annotations_2d = tuple([a for a in annotations if (ANNOTATION_TYPE_TO_ANNOTATION_GROUP[a] == '2d')])
if annotations_2d:
dataset = scene_dataset_class(scene_json, datum_names=camera_datum_names, requested_annotations=annotations_2d, only_annotated_datums=True)
if len(dataset):
if (dst_dir is not None):
os.makedirs(os.path.join(dst_dir, '2d'), exist_ok=True)
video_file = os.path.join(dst_dir, '2d', video_path)
visualize_dataset_2d(dataset, camera_datum_names=camera_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, max_num_items=max_num_items, show_instance_id=show_instance_id)
logging.info('Visualizing 2D annotation visualizations to {}'.format(video_file))
else:
logging.info('Scene {} does not contain any of the requested datums {} annotated with {}. Skip 2d visualization.'.format(scene_json, camera_datum_names, annotations_2d))
annotations_3d = tuple([a for a in annotations if (ANNOTATION_TYPE_TO_ANNOTATION_GROUP[a] == '3d')])
if (annotations_3d or render_pointcloud or render_radar_pointcloud):
datum_names = ((list(camera_datum_names) + list(lidar_datum_names)) + list(radar_datum_names))
dataset = SynchronizedScene(scene_json, datum_names=datum_names, requested_annotations=annotations_3d, only_annotated_datums=True)
if len(dataset):
if (dst_dir is not None):
os.makedirs(os.path.join(dst_dir, '3d'), exist_ok=True)
video_file = os.path.join(dst_dir, '3d', video_path)
visualize_dataset_3d(dataset, camera_datum_names=camera_datum_names, lidar_datum_names=lidar_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, render_pointcloud_on_images=render_pointcloud, max_num_items=max_num_items, show_instance_id_on_bev=show_instance_id, radar_datum_names=radar_datum_names, render_radar_pointcloud_on_images=render_radar_pointcloud)
logging.info('Visualizing 3D annotation visualizations to {}'.format(video_file))
else:
logging.info('Scene {} does not contain any of the requested samples {} annotated with {}. Skip 3d visualization.'.format(scene_json, datum_names, annotations_3d))
if render_raw:
datum_names = ((list(camera_datum_names) + list(lidar_datum_names)) + list(radar_datum_names))
dataset = SynchronizedScene(scene_json, datum_names=datum_names, only_annotated_datums=False)
if len(dataset):
if (dst_dir is not None):
os.makedirs(os.path.join(dst_dir, 'raw'), exist_ok=True)
video_file = os.path.join(dst_dir, 'raw', video_path)
visualize_dataset_3d(dataset, camera_datum_names=camera_datum_names, lidar_datum_names=lidar_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, render_pointcloud_on_images=render_pointcloud, max_num_items=max_num_items, show_instance_id_on_bev=False, radar_datum_names=radar_datum_names, render_radar_pointcloud_on_images=render_radar_pointcloud)
logging.info('Visualizing raw sensory data visualizations to {}'.format(video_file))
else:
logging.info('Scene {} does not contain any of the requested samples {}. Skip visualization.'.format(scene_json, datum_names))
|
@cli.command(name='visualize-scenes')
@click.option('--scene-dataset-json', required=True, help='Path to SceneDataset JSON')
@click.option('--split', type=click.Choice(['train', 'val', 'test', 'train_overfit']), required=True, help='Dataset split to be fetched.')
@add_options(options=VISUALIZE_OPTIONS)
def visualize_scenes(scene_dataset_json, split, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items, video_fps, dst_dir, verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud, render_raw):
'Parallelized visualizing of scene dataset.\n\n Example\n -------\n $ cli.py visualize-scenes --scene-dataset-json tests/data/dgp/test_scene/scene_dataset_v1.0.json --dst-dir /mnt/fsx -a bounding_box_3d --split train -c camera_01\n '
if verbose:
logging.basicConfig(level=logging.INFO)
dataset = open_pbobject(scene_dataset_json, pb_class=SceneDataset)
if (dst_dir is not None):
dataset_directory = os.path.join(dst_dir, os.path.basename(scene_dataset_json).split('.')[0])
os.makedirs(dataset_directory, exist_ok=True)
logging.info('Visualizing dataset into {}'.format(dataset_directory))
else:
dataset_directory = None
scene_jsons = dataset.scene_splits[DATASET_SPLIT_NAME_TO_KEY[split]].filenames
for scene_json in scene_jsons:
scene_json = os.path.join(os.path.dirname(scene_dataset_json), scene_json)
visualize_scene.callback(scene_json, annotations=annotations, camera_datum_names=camera_datum_names, dataset_class=dataset_class, show_instance_id=show_instance_id, max_num_items=max_num_items, video_fps=video_fps, dst_dir=dataset_directory, verbose=verbose, lidar_datum_names=lidar_datum_names, render_pointcloud=render_pointcloud, radar_datum_names=radar_datum_names, render_radar_pointcloud=render_radar_pointcloud, render_raw=render_raw)
|
class AddLidarCuboidPointsContext(AddLidarCuboidPoints):
'Add Lidar Points but applied to samples not datums'
def __call__(self, sample: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
new_sample = []
for datum in sample:
if ((datum['datum_type'] == 'point_cloud') and ('bounding_box_3d' in datum)):
if (datum['bounding_box_3d'] is not None):
datum = super().__call__(datum)
new_sample.append(datum)
return new_sample
|
class ScaleImages(ScaleAffineTransform):
'Scale Transform but applied to samples not datums'
def __call__(self, sample: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
new_sample = []
for datum in sample:
if ((datum['datum_type'] == 'image') and ('rgb' in datum)):
datum = super().__call__(datum)
new_sample.append(datum)
return new_sample
|
@click.group()
@click.version_option()
def cli():
logging.getLogger('dgp2widker').setLevel(level=logging.INFO)
logging.getLogger('py4j').setLevel(level=logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
|
@cli.command(name='ingest')
@click.option('--scene-dataset-json', required=True, help='Path to DGP Dataset JSON')
@click.option('--wicker-dataset-name', required=True, default=None, help='Name of dataset in Wicker')
@click.option('--wicker-dataset-version', required=True, help='Version of dataset in Wicker')
@click.option('--datum-names', required=True, help='List of datum names')
@click.option('--requested-annotations', help='List of annotation types')
@click.option('--only-annotated-datums', is_flag=True, help='Apply only annotated datums')
@click.option('--max-num-scenes', required=False, default=None, help='The maximum number of scenes to process')
@click.option('--max-len', required=False, default=1000, help='The maximum number of samples per scene')
@click.option('--chunk-size', required=False, default=1000, help='The number of samples per chunk')
@click.option('--skip-camera-cuboids', is_flag=True, help='If True, skip cuboids for non lidar datums')
@click.option('--num-partitions', required=False, default=None, help='Number of scene partitions')
@click.option('--num-repartitions', required=False, default=None, help='Number of sample partitions')
@click.option('--is-pd', is_flag=True, help='If true, process the dataset with ParallelDomainScene')
@click.option('--data-uri', required=False, default=None, help='Alternate location for scene data')
@click.option('--add-lidar-points', is_flag=True, help='Add lidar point count to lidar cuboids')
@click.option('--half-size-images', is_flag=True, help='Resize image datums to half size')
@click.option('--alternate-scene-uri', required=False, default=None, help='Alternate scene locaiton to sync')
def ingest(scene_dataset_json, wicker_dataset_name, wicker_dataset_version, datum_names, requested_annotations, only_annotated_datums, max_num_scenes, max_len, chunk_size, skip_camera_cuboids, num_partitions, num_repartitions, is_pd, data_uri, add_lidar_points, half_size_images, alternate_scene_uri):
datum_names = [x.strip() for x in datum_names.split(',')]
requested_annotations = ([x.strip() for x in requested_annotations.split(',')] if requested_annotations else None)
dataset_kwargs = {'datum_names': datum_names, 'requested_annotations': requested_annotations, 'only_annotated_datums': only_annotated_datums}
pipeline = []
if add_lidar_points:
pipeline.append(AddLidarCuboidPointsContext())
if half_size_images:
pipeline.append(ScaleImages(s=0.5))
results = ingest_dgp_to_wicker(scene_dataset_json=scene_dataset_json, wicker_dataset_name=wicker_dataset_name, wicker_dataset_version=wicker_dataset_version, dataset_kwargs=dataset_kwargs, spark_context=None, pipeline=pipeline, max_num_scenes=max_num_scenes, max_len=max_len, chunk_size=chunk_size, skip_camera_cuboids=skip_camera_cuboids, num_partitions=num_partitions, num_repartitions=num_repartitions, is_pd=is_pd, data_uri=data_uri, alternate_scene_uri=alternate_scene_uri)
print('Finished ingest!')
print(results)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.