code
stringlengths
17
6.64M
def get_split_file(mode: str) -> Path: 'Get scene information file based on the scene number.' file = ((PATHS['syns_patches'] / 'splits') / f'{mode}_files.txt') return file
def get_scenes() -> list[Path]: 'Get paths to each of the scenes.' return sorted((path for path in PATHS['syns_patches'].iterdir() if (path.is_dir() and (path.stem != 'splits'))))
def get_scene_files(scene_dir: Path) -> dict[(str, ty.S[Path])]: 'Get paths to all subdir files for a given scene.' files = {key: sorted((scene_dir / key).iterdir()) for key in SUBDIRS if (scene_dir / key).is_dir()} return files
def get_info_file(scene: str) -> Path: 'Get scene information file based on the scene number.' paths = (PATHS['syns_patches'] / scene).iterdir() return next((f for f in paths if (f.suffix == '.txt')))
def get_image_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'images') / file)
def get_depth_file(scene: str, file: str) -> Path: 'Get image filename based on scene and item number.' return (((PATHS['syns_patches'] / scene) / 'depths') / file).with_suffix('.npy')
def get_edges_file(scene: str, subdir: str, file: str) -> Path: 'Get image filename based on scene and item number.' assert ('edges' in subdir), f'Must provide an "edges" directory. ({subdir})' assert (subdir in SUBDIRS), f"Non-existent edges directory. ({subdir} vs. {[s for s in SUBDIRS if ('edges' in s)]})" return (((PATHS['syns_patches'] / scene) / subdir) / file)
def load_info(scene: str) -> ty.S[str]: 'Load the scene information.' file = get_info_file(scene) info = io.readlines(file, encoding='latin-1') return info
def load_category(scene: str) -> tuple[(str, str)]: 'Load the scene category and subcategory.' info = load_info(scene) category = info[1].replace('Scene Category: ', '') try: (cat, subcat) = category.split(': ') except ValueError: (cat, subcat) = category.split(' - ') return (cat, subcat)
def load_split(mode) -> tuple[(Path, ty.S[Item])]: 'Load the list of scenes and filenames that are part of the test split.\n\n Test split file is given as "SEQ ITEM":\n ```\n 01 00.png\n 10 11.png\n ```\n ' file = get_split_file(mode) lines = io.tmap(Item, io.readlines(file, split=True), star=True) return (file, lines)
def load_intrinsics() -> ty.A: 'Computes the virtual camera intrinsics for the `Kitti` based SYNS Patches.\n We compute this based on the desired FOV, using basic trigonometry.\n\n :return: (ndarray) (4, 4) Camera intrinsic parameters.\n ' (Fy, Fx) = KITTI_FOV (h, w) = KITTI_SHAPE (cx, cy) = ((w // 2), (h // 2)) fx = (cx / np.tan((np.deg2rad(Fx) / 2))) fy = (cy / np.tan((np.deg2rad(Fy) / 2))) K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) return K
@dataclass class Item(): 'Class to load items from TUM-RGBD dataset.' seq: str rgb_stem: str depth_stem: str @classmethod def get_split_file(cls, mode: str) -> Path: 'Get path to dataset split. {test}' return ((PATHS['tum'] / 'splits') / f'{mode}_files.txt') @classmethod def load_split(cls, mode: str) -> ty.S['Item']: 'Load dataset split. {test}' file = cls.get_split_file(mode) return [cls(*line) for line in io.readlines(file, split=True)] def get_img_file(self) -> Path: 'Get path to image file.' return ((PATHS['tum'] / self.seq) / self.rgb_stem) def get_depth_file(self) -> Path: 'Get path to Kinect depth file.' return ((PATHS['tum'] / self.seq) / self.depth_stem) def load_img(self) -> Image: 'Load image.' file = self.get_img_file() img = Image.open(file) return img def load_depth(self) -> ty.A: 'Load Kinect depth map.' file = self.get_depth_file() depth = (np.array(Image.open(file), dtype=np.float32) / 5000) return depth[(..., None)]
def create_splits(th: float=0.02, max: int=2500, seed: int=42) -> None: 'Create a split of associated images & depth maps.\n\n :param th: (float) Maximum time difference between two images to be considered as associated.\n :param max: (int) Maximum number of images in split.\n :param seed: (int) Random seed.\n :return\n ' file = ((PATHS['tum'] / 'splits') / 'test_files.txt') io.mkdirs(file.parent) items = [] seqs = io.get_dirs(PATHS['tum'], key=(lambda f: (f.stem != 'splits'))) for seq in seqs: img_file = (seq / 'rgb.txt') depths_file = (seq / 'depth.txt') first_list = read_file_list(img_file) second_list = read_file_list(depths_file) matches = associate(first_list, second_list, offset=0, max_difference=th) if ('freiburg2' in seq.stem): matches = matches[::3] for (a, b) in matches: items.append(f'''{seq.stem} {first_list[a][0]} {second_list[b][0]} ''') random.seed(seed) random.shuffle(items) items = sorted(items[:max]) with open(file, 'w') as f: f.writelines(items)
def read_file_list(filename): 'Reads a trajectory from a text file. From: https://cvg.cit.tum.de/data/datasets/rgbd-dataset/tools\n\n File format:\n The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)\n and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.\n\n Input:\n filename -- File name\n\n Output:\n dict -- dictionary of (stamp,data) tuples\n ' with open(filename) as f: data = f.read() lines = data.replace(',', ' ').replace('\t', ' ').split('\n') list = [[v.strip() for v in line.split(' ') if (v.strip() != '')] for line in lines if ((len(line) > 0) and (line[0] != '#'))] list = [(float(l[0]), l[1:]) for l in list if (len(l) > 1)] return dict(list)
def associate(first_list, second_list, offset, max_difference): 'Associate image and depth pairs. From: https://cvg.cit.tum.de/data/datasets/rgbd-dataset/tools\n\n Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim\n to find the closest match for every input tuple.\n\n Input:\n first_list -- first dictionary of (stamp,data) tuples\n second_list -- second dictionary of (stamp,data) tuples\n offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)\n max_difference -- search radius for candidate generation\n\n Output:\n matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))\n ' first_keys = list(first_list.keys()) second_keys = list(second_list.keys()) potential_matches = [(abs((a - (b + offset))), a, b) for a in first_keys for b in second_keys if (abs((a - (b + offset))) < max_difference)] potential_matches.sort() matches = [] for (diff, a, b) in potential_matches: if ((a in first_keys) and (b in second_keys)): first_keys.remove(a) second_keys.remove(b) matches.append((a, b)) matches.sort() return matches
class Database(): _database = None _protocol = None _length = None def __init__(self, path: PathLike, readahead: bool=True, pre_open: bool=False): 'Base class for LMDB-backed _databases.\n\n :param path: (PathLike) Path to the database.\n :param readahead: (bool) If `True`, enables the filesystem readahead mechanism.\n :param pre_open: (bool) If `True`, the first iterations will be faster, but it will raise error when doing multi-gpu training.\n If `False`, the database will open when you will retrieve the first item.\n ' self.path = str(path) self.readahead = readahead self.pre_open = pre_open self._has_fetched_an_item = False @property def database(self): if (self._database is None): self._database = lmdb.open(path=self.path, readonly=True, readahead=self.readahead, max_spare_txns=256, lock=False) return self._database @database.deleter def database(self): if (self._database is not None): self._database.close() self._database = None @property def protocol(self): 'Read the pickle protocol contained in the database.\n\n :return: The set of available keys.\n ' if (self._protocol is None): self._protocol = self._get(item='protocol', convert_key=(lambda key: key.encode('ascii')), convert_value=(lambda value: pickle.loads(value))) return self._protocol @property def keys(self): 'Read the keys contained in the database.\n\n :return: The set of available keys.\n ' protocol = self.protocol keys = self._get(item='keys', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return keys def __len__(self): 'Returns the number of keys available in the database.\n\n :return: The number of keys.\n ' if (self._length is None): self._length = len(self.keys) return self._length def __getitem__(self, item): 'Retrieves an item or a list of items from the database.\n\n :param item: A key or a list of keys.\n :return: A value or a list of values.\n ' self._has_fetched_an_item = True if (not isinstance(item, list)): item = self._get(item, self._convert_key, self._convert_value) else: item = self._gets(item, self._convert_keys, self._convert_values) return item def __contains__(self, item): 'Check if a given key is in the database.' return (item in self.keys) def index(self, index): 'Retrieves an item or a list of items from the database from an integer index.\n\n :param index: An index or a list of indexes.\n :return: A value or a list of values.\n ' key = self.keys[index] return (key, self[key]) def _get(self, item, convert_key, convert_value): 'Instantiates a transaction and its associated cursor to fetch an item.\n\n :param item: A key.\n :param convert_key:\n :param convert_value:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: item = self._fetch(cursor, item, convert_key, convert_value) self._keep_database() return item def _gets(self, items, convert_keys, convert_values): 'Instantiates a transaction and its associated cursor to fetch a list of items.\n\n :param items: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return:\n ' with self.database.begin() as txn: with txn.cursor() as cursor: items = self._fetchs(cursor, items, convert_keys, convert_values) self._keep_database() return items def _fetch(self, cursor, key, convert_key, convert_value): 'Retrieve a value given a key.\n\n :param cursor:\n :param key: A key.\n :param convert_key:\n :param convert_value:\n :return: A value.\n ' key = convert_key(key=key) value = cursor.get(key=key) value = convert_value(value=value) return value def _fetchs(self, cursor, keys, convert_keys, convert_values): 'Retrieve a list of values given a list of keys.\n\n :param cursor:\n :param keys: A list of keys.\n :param convert_keys:\n :param convert_values:\n :return: A list of values.\n ' keys = convert_keys(keys=keys) (_, values) = list(zip(*cursor.getmulti(keys))) values = convert_values(values=values) return values def _convert_key(self, key): 'Converts a key into a byte key.\n\n :param key: A key.\n :return: A byte key.\n ' return pickle.dumps(key, protocol=self.protocol) def _convert_keys(self, keys): 'Converts keys into byte keys.\n\n :param keys: A list of keys.\n :return: A list of byte keys.\n ' return [self._convert_key(key=key) for key in keys] def _convert_value(self, value): 'Converts a byte value back into a value.\n\n :param value: A byte value.\n :return: A value\n ' return pickle.loads(value) def _convert_values(self, values): 'Converts bytes values back into values.\n\n :param values: A list of byte values.\n :return: A list of values.\n ' return [self._convert_value(value=value) for value in values] def _keep_database(self): 'Checks if the database must be deleted.' if ((not self.pre_open) and (not self._has_fetched_an_item)): del self.database def __iter__(self): 'Provides an iterator over the keys when iterating over the database.' return iter(self.keys) def __del__(self): 'Closes the database properly.' del self.database
class ImageDatabase(Database): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL Image image.\n ' return Image.open(io.BytesIO(value))
class MaskDatabase(ImageDatabase): def _convert_value(self, value): 'Converts a byte image back into a PIL Image.\n\n :param value: A byte image.\n :return: A PIL image.\n ' return Image.open(io.BytesIO(value)).convert('1')
class LabelDatabase(Database): pass
class ArrayDatabase(Database): _dtype = None _shape = None @property def dtype(self): if (self._dtype is None): protocol = self.protocol self._dtype = self._get(item='dtype', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._dtype @property def shape(self): if (self._shape is None): protocol = self.protocol self._shape = self._get(item='shape', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value))) return self._shape def _convert_value(self, value): return np.frombuffer(value, dtype=self.dtype).reshape(self.shape) def _convert_values(self, values): return np.frombuffer(b''.join(values), dtype=self.dtype).reshape(((len(values),) + self.shape))
class TensorDatabase(ArrayDatabase): def _convert_value(self, value): return torch.from_numpy(super(TensorDatabase, self)._convert_value(value)) def _convert_values(self, values): return torch.from_numpy(super(TensorDatabase, self)._convert_values(values))
def write_image_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_database = database with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40), writemap=True) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) for (key, value) in tqdm(sorted(d.items())): with env.begin(write=True) as txn: with value.open('rb') as file: key = pickle.dumps(key) txn.put(key=key, value=file.read(), dupdata=False)
def write_label_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_dir = (Path('/tmp') / f'TEMP_{time()}') tmp_dir.mkdir(parents=True) tmp_database = (tmp_dir / f'{database.name}') with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False) shutil.move(f'{tmp_database}', database) shutil.rmtree(tmp_dir)
def write_array_database(d: dict, database: Path): database.parent.mkdir(parents=True, exist_ok=True) if database.exists(): shutil.rmtree(database) tmp_database = database with lmdb.open(path=f'{tmp_database}', map_size=(2 ** 40)) as env: with env.begin(write=True) as txn: key = 'protocol'.encode('ascii') value = pickle.dumps(pickle.DEFAULT_PROTOCOL) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('keys') value = pickle.dumps(sorted(d.keys())) txn.put(key=key, value=value, dupdata=False) value = next(iter(d.values())) shape = value.shape dtype = value.dtype with env.begin(write=True) as txn: key = pickle.dumps('shape') value = pickle.dumps(shape) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: key = pickle.dumps('dtype') value = pickle.dumps(dtype) txn.put(key=key, value=value, dupdata=False) with env.begin(write=True) as txn: for (key, value) in tqdm(sorted(d.items())): key = pickle.dumps(key) value = pickle.dumps(value) txn.put(key=key, value=value, dupdata=False)
class AgentSnapshot2DList(AgentSnapshotList): 'Container for 2D agent list.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n \n TODO : Add support for BoundingBox2DAnnotationList.\n boxlist: list[BoundingBox2D]\n List of BoundingBox2D objects. See `utils/structures/bounding_box_2d`\n for more details.\n ' def __init__(self, ontology, boxlist): super().__init__(ontology) assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load AgentSnapshot2DList with wrong type of ontology!' for box in boxlist: assert isinstance(box, BoundingBox2D), f'Can only instantiate an agent snapshot list from a list of BoundingBox2D, not {type(box)}' self.boxlist = boxlist @classmethod def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table): 'Loads agent snapshot list from proto into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for agent types will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: dgp.proto.agent.AgentsSlice.agent_snapshots or dgp.proto.agent.AgentTrack.agent_snapshots\n A proto message holding list of agent snapshot.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict, optional\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n Default: None.\n\n Returns\n -------\n AgentSnapshot2DList\n Agent Snapshot list object instantiated from proto object.\n ' boxlist = [] for agent_snapshot_2d in agent_snapshots_pb2: feature_type = agent_snapshot_2d.agent_snapshot_2D.feature_type feature_ontology = feature_ontology_table[FEATURE_TYPE_ID_TO_KEY[feature_type]] boxlist.append(BoundingBox2D(box=np.float32([agent_snapshot_2d.agent_snapshot_2D.box.x, agent_snapshot_2d.agent_snapshot_2D.box.y, agent_snapshot_2d.agent_snapshot_2D.box.w, agent_snapshot_2d.agent_snapshot_2D.box.h]), class_id=ontology.class_id_to_contiguous_id[agent_snapshot_2d.agent_snapshots_2D.class_id], instance_id=agent_snapshot_2d.agent_snapshot_2D.instance_id, color=ontology.colormap[agent_snapshot_2d.agent_snapshot_2D.class_id], attributes=dict([(feature_ontology.id_to_name[feature_id], feature) for (feature_id, feature) in enumerate(agent_snapshot_2d.agent_snapshot_2D.features)]))) return cls(ontology, boxlist) def __len__(self): return len(self.boxlist) def __getitem__(self, index): 'Return a single 3D bounding box' return self.boxlist[index] def render(self): 'TODO: Batch rendering function for bounding boxes.' @property def class_ids(self): 'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.int64([box.class_id for box in self.boxlist]) @property def attributes(self): 'Return a list of dictionaries of attribute name to value.' return [box.attributes for box in self.boxlist] @property def instance_ids(self): return np.int64([box.instance_id for box in self.boxlist])
class AgentSnapshot3DList(AgentSnapshotList): 'Container for 3D agent list.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n boxlist: list[BoundingBox3D]\n List of BoundingBox3D objects. See `utils/structures/bounding_box_3d`\n for more details.\n ' def __init__(self, ontology, boxlist): super().__init__(ontology) assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load AgentSnapshot3DList with wrong type of ontology!' for box in boxlist: assert isinstance(box, BoundingBox3D), f'Can only instantiate an agent snapshot list from a list of BoundingBox3D, not {type(box)}' self.boxlist = boxlist @classmethod def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table): 'Loads agent snapshot list from proto into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for agent types will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: dgp.proto.agent.AgentsSlice.agent_snapshots or dgp.proto.agent.AgentTrack.agent_snapshots\n A proto message holding list of agent snapshot.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n\n Returns\n -------\n AgentSnapshot3DList\n Agent Snapshot list object instantiated from proto object.\n ' boxlist = [] for agent_snapshot_3d in agent_snapshots_pb2: feature_type = agent_snapshot_3d.agent_snapshot_3D.feature_type feature_ontology = feature_ontology_table[FEATURE_TYPE_ID_TO_KEY[feature_type]] boxlist.append(BoundingBox3D(pose=Pose.load(agent_snapshot_3d.agent_snapshot_3D.box.pose), sizes=np.float32([agent_snapshot_3d.agent_snapshot_3D.box.width, agent_snapshot_3d.agent_snapshot_3D.box.length, agent_snapshot_3d.agent_snapshot_3D.box.height]), class_id=ontology.class_id_to_contiguous_id[agent_snapshot_3d.agent_snapshot_3D.class_id], instance_id=agent_snapshot_3d.agent_snapshot_3D.instance_id, sample_idx=agent_snapshot_3d.slice_id.index, color=ontology.colormap[agent_snapshot_3d.agent_snapshot_3D.class_id], attributes=dict([(feature_ontology.id_to_name[feature_id], feature) for (feature_id, feature) in enumerate(agent_snapshot_3d.agent_snapshot_3D.features)]))) return cls(ontology, boxlist) def __len__(self): return len(self.boxlist) def __getitem__(self, index): 'Return a single 3D bounding box' return self.boxlist[index] def render(self, image, camera, line_thickness=2, font_scale=0.5): 'Render the 3D boxes in this agents on the image in place\n\n Parameters\n ----------\n image: np.ndarray\n Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.\n Data type is uint8.\n\n camera: dgp.utils.camera.Camera\n Camera used to render the bounding box.\n\n line_thickness: int, optional\n Thickness of bounding box lines. Default: 2.\n\n font_scale: float, optional\n Font scale used in text labels. Default: 0.5.\n\n Raises\n ------\n ValueError\n Raised if `image` is not a 3-channel uint8 numpy array.\n TypeError\n Raised if `camera` is not an instance of Camera.\n ' if ((not isinstance(image, np.ndarray)) or (image.dtype != np.uint8) or (len(image.shape) != 3) or (image.shape[2] != 3)): raise ValueError('`image` needs to be a 3-channel uint8 numpy array') if (not isinstance(camera, Camera)): raise TypeError('`camera` should be of type Camera') for box in self.boxlist: box.render(image, camera, line_thickness=line_thickness, class_name=self._ontology.contiguous_id_to_name[box.class_id], font_scale=font_scale) @property def poses(self): 'Get poses for bounding boxes in agent list.' return [box.pose for box in self.boxlist] @property def sizes(self): return np.float32([box.sizes for box in self.boxlist]) @property def class_ids(self): 'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.int64([box.class_id for box in self.boxlist]) @property def attributes(self): 'Return a list of dictionaries of attribute name to value.' return [box.attributes for box in self.boxlist] @property def instance_ids(self): return np.int64([box.instance_id for box in self.boxlist])
class AgentSnapshotList(ABC): 'Base agent snapshot list type. All other agent snapshot lists should inherit from this type and implement\n abstractmethod.\n\n Parameters\n ----------\n ontology: Ontology, default:None\n Ontology object for the annotation key.\n\n ' def __init__(self, ontology=None): if (ontology is not None): assert isinstance(ontology, Ontology), 'Invalid ontology!' self._ontology = ontology @property def ontology(self): return self._ontology @classmethod def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table): 'Loads agent snapshot list from prot into a canonical format for consumption in __getitem__ function in\n BaseDataset.\n Format/data structure for annotations will vary based on task.\n\n Parameters\n ----------\n agent_snapshots_pb2: object\n An agent proto message holding agent information.\n\n ontology: Ontology\n Ontology for given agent.\n\n feature_ontology_table: dict, optional\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n Default: None.\n ' @abstractmethod def render(self): 'Return a rendering of the agent snapshot list. Expected format is a PIL.Image or np.array'
class Annotation(ABC): 'Base annotation type. All other annotations should inherit from this type and implement\n member functions.\n\n Parameters\n ----------\n ontology: Ontology, default: None\n Ontology object for the annotation key\n ' def __init__(self, ontology=None): if (ontology is not None): assert isinstance(ontology, Ontology), 'Invalid ontology!' self._ontology = ontology @property def ontology(self): return self._ontology @classmethod @abstractmethod def load(cls, annotation_file, ontology): 'Loads annotation from file into a canonical format for consumption in __getitem__ function in BaseDataset.\n Format/data structure for annotations will vary based on task.\n\n Parameters\n ----------\n annotation_file: str\n Full path to annotation\n\n ontology: Ontology\n Ontology for given annotation\n ' @abstractmethod def save(self, save_dir): 'Serialize annotation object if possible, and saved to specified directory.\n Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Path to directory to saved annotation\n ' @abstractmethod def render(self): 'Return a rendering of the annotation. Expected format is a PIL.Image or np.array' @property @abstractmethod def hexdigest(self): 'Reproducible hash of annotation.' def __eq__(self, other): return (self.hexdigest == other.hexdigest) def __repr__(self): return f'{self.__class__.__name__}[{os.path.basename(self.hexdigest)}]'
class BoundingBox2DAnnotationList(Annotation): 'Container for 2D bounding box annotations.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n\n boxlist: list[BoundingBox2D]\n List of BoundingBox2D objects. See `dgp/utils/structures/bounding_box_2d` for more details.\n ' def __init__(self, ontology, boxlist): super().__init__(ontology) assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load annotation with wrong type of ontology!' for box in boxlist: assert isinstance(box, BoundingBox2D), f'Can only instantate an annotation from a list of BoundingBox2D, not {type(box)}' self.boxlist = boxlist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: BoundingBoxOntology\n Ontology for 2D bounding box tasks.\n\n Returns\n -------\n BoundingBox2DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, BoundingBox2DAnnotations) boxlist = [BoundingBox2D(box=np.float32([ann.box.x, ann.box.y, ann.box.w, ann.box.h]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], instance_id=ann.instance_id, color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations] return cls(ontology, boxlist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n BoundingBox2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n ' return BoundingBox2DAnnotations(annotations=[BoundingBox2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[box.class_id], box=box.to_proto(), area=int(box.area), instance_id=box.instance_id, attributes=box.attributes) for box in self.boxlist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self.boxlist) def __getitem__(self, index): 'Return a single 3D bounding box' return self.boxlist[index] def render(self): 'TODO: Batch rendering function for bounding boxes.' raise NotImplementedError @property def ltrb(self): 'Return boxes as (N, 4) np.ndarray in format ([left, top, right, bottom])' return np.array([box.ltrb for box in self.boxlist], dtype=np.float32) @property def ltwh(self): 'Return boxes as (N, 4) np.ndarray in format ([left, top, width, height])' return np.array([box.ltwh for box in self.boxlist], dtype=np.float32) @property def class_ids(self): 'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.array([box.class_id for box in self.boxlist], dtype=np.int64) @property def attributes(self): 'Return a list of dictionaries of attribute name to value.' return [box.attributes for box in self.boxlist] @property def instance_ids(self): return np.array([box.instance_id for box in self.boxlist], dtype=np.int64) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto())
class BoundingBox3DAnnotationList(Annotation): 'Container for 3D bounding box annotations.\n\n Parameters\n ----------\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n boxlist: list[BoundingBox3D]\n List of BoundingBox3D objects. See `utils/structures/bounding_box_3d`\n for more details.\n ' def __init__(self, ontology, boxlist): super().__init__(ontology) assert isinstance(self._ontology, BoundingBoxOntology), 'Trying to load annotation with wrong type of ontology!' for box in boxlist: assert isinstance(box, BoundingBox3D), f'Can only instantiate an annotation from a list of BoundingBox3D, not {type(box)}' self.boxlist = boxlist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: BoundingBoxOntology\n Ontology for 3D bounding box tasks.\n\n Returns\n -------\n BoundingBox3DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, BoundingBox3DAnnotations) boxlist = [BoundingBox3D(pose=Pose.load(ann.box.pose), sizes=np.float32([ann.box.width, ann.box.length, ann.box.height]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], instance_id=ann.instance_id, color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {}), num_points=ann.num_points, occlusion=ann.box.occlusion, truncation=ann.box.truncation) for ann in _annotation_pb2.annotations] return cls(ontology, boxlist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n BoundingBox3DAnnotations\n Annotation as defined `proto/annotations.proto`\n ' return BoundingBox3DAnnotations(annotations=[BoundingBox3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[box.class_id], box=box.to_proto(), instance_id=box.instance_id, attributes=box.attributes, num_points=box.num_points) for box in self.boxlist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n A pathname to a directory to save the annotation object into.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self.boxlist) def __getitem__(self, index): 'Return a single 3D bounding box' return self.boxlist[index] def render(self, image, camera, line_thickness=2, font_scale=0.5): 'Render the 3D boxes in this annotation on the image in place\n\n Parameters\n ----------\n image: np.uint8\n Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.\n Element type must be uint8.\n\n camera: dgp.utils.camera.Camera\n Camera used to render the bounding box.\n\n line_thickness: int, optional\n Thickness of bounding box lines. Default: 2.\n\n font_scale: float, optional\n Font scale used in text labels. Default: 0.5.\n\n Raises\n ------\n ValueError\n Raised if image is not a 3-channel uint8 numpy array.\n TypeError\n Raised if camera is not an instance of Camera.\n ' if ((not isinstance(image, np.ndarray)) or (image.dtype != np.uint8) or (len(image.shape) != 3) or (image.shape[2] != 3)): raise ValueError('`image` needs to be a 3-channel uint8 numpy array') if (not isinstance(camera, Camera)): raise TypeError('`camera` should be of type Camera') for box in self.boxlist: box.render(image, camera, line_thickness=line_thickness, class_name=self._ontology.contiguous_id_to_name[box.class_id], font_scale=font_scale) @property def poses(self): 'Get poses for bounding boxes in annotation.' return [box.pose for box in self.boxlist] @property def sizes(self): return np.float32([box.sizes for box in self.boxlist]) @property def class_ids(self): 'Return class ID for each box, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.int64([box.class_id for box in self.boxlist]) @property def attributes(self): 'Return a list of dictionaries of attribute name to value.' return [box.attributes for box in self.boxlist] @property def instance_ids(self): return np.int64([box.instance_id for box in self.boxlist]) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto()) def project(self, camera): 'Project bounding boxes into a camera and get back 2D bounding boxes in the frustum.\n\n Parameters\n ----------\n camera: Camera\n The Camera instance to project into.\n\n Raises\n ------\n NotImplementedError\n Unconditionally.\n ' raise NotImplementedError
class DenseDepthAnnotation(Annotation): 'Container for per-pixel depth annotation.\n\n Parameters\n ----------\n depth: np.ndarray\n 2D numpy float array that stores per-pixel depth.\n ' def __init__(self, depth): assert isinstance(depth, np.ndarray) assert (depth.dtype in [np.float32, np.float64]) super().__init__(None) self._depth = depth @property def depth(self): return self._depth @classmethod def load(cls, annotation_file, ontology=None): 'Loads annotation from file into a canonical format for consumption in __getitem__ function in BaseDataset.\n\n Parameters\n ----------\n annotation_file: str\n Full path to NPZ file that stores 2D depth array.\n\n ontology: None\n Dummy ontology argument to meet the usage in `BaseDataset.load_annotation()`.\n ' assert (ontology is None), "'ontology' must be 'None' for {}.".format(cls.__name__) depth = np.load(annotation_file)['data'] return cls(depth) @property def hexdigest(self): return generate_uid_from_point_cloud(self.depth) def save(self, save_dir): 'Serialize annotation object if possible, and saved to specified directory.\n Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Path to directory to saved annotation\n\n Returns\n -------\n pointcloud_path: str\n Full path to the output NPZ file.\n ' pointcloud_path = os.path.join(save_dir, '{}.npz'.format(self.hexdigest)) np.savez_compressed(pointcloud_path, data=self.depth) return pointcloud_path def render(self): 'TODO: Rendering function for per-pixel depth.'
class KeyLine2DAnnotationList(Annotation): 'Container for 2D keyline annotations.\n\n Parameters\n ----------\n ontology: KeyLineOntology\n Ontology for 2D keyline tasks.\n\n linelist: list[KeyLine2D]\n List of KeyLine2D objects. See `dgp/utils/structures/key_line_2d` for more details.\n ' def __init__(self, ontology, linelist): super().__init__(ontology) assert isinstance(self._ontology, KeyLineOntology), 'Trying to load annotation with wrong type of ontology!' for line in linelist: assert isinstance(line, KeyLine2D), f'Can only instantate an annotation from a list of KeyLine2D, not {type(line)}' self.linelist = linelist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyLineOntology\n Ontology for 2D keyline tasks.\n\n Returns\n -------\n KeyLine2DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, KeyLine2DAnnotations) linelist = [KeyLine2D(line=np.float32([[vertex.x, vertex.y] for vertex in ann.vertices]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations] return cls(ontology, linelist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n KeyLine2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n ' return KeyLine2DAnnotations(annotations=[KeyLine2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[line.class_id], vertices=[KeyPoint2D(point=np.float32([x, y]), class_id=line.class_id, instance_id=line.instance_id, color=line.color, attributes=line.attributes).to_proto() for (x, y) in zip(line.x, line.y)], attributes=line.attributes) for line in self.linelist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation.\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self.linelist) def __getitem__(self, index): 'Return a single 2D keyline' return self.linelist[index] def render(self): 'TODO: Batch rendering function for keylines.' raise NotImplementedError @property def xy(self): 'Return lines as (N, 2) np.ndarray in format ([x, y])' return np.array([line.xy.tolist() for line in self.linelist], dtype=np.float32) @property def class_ids(self): 'Return class ID for each line, with ontology applied:\n class IDs mapped to a contiguous set.\n ' return np.array([line.class_id for line in self.linelist], dtype=np.int64) @property def attributes(self): 'Return a list of dictionaries of attribut name to value.' return [line.attributes for line in self.linelist] @property def instance_ids(self): return np.array([line.instance_id for line in self.linelist], dtype=np.int64) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto())
class KeyLine3DAnnotationList(Annotation): 'Container for 3D keyline annotations.\n\n Parameters\n ----------\n ontology: KeyLineOntology\n Ontology for 3D keyline tasks.\n\n linelist: list[KeyLine3D]\n List of KeyLine3D objects. See `dgp/utils/structures/key_line_3d` for more details.\n ' def __init__(self, ontology, linelist): super().__init__(ontology) assert isinstance(self._ontology, KeyLineOntology), 'Trying to load annotation with wrong type of ontology!' for line in linelist: assert isinstance(line, KeyLine3D), f'Can only instantate an annotation from a list of KeyLine3D, not {type(line)}' self._linelist = linelist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyLineOntology\n Ontology for 3D keyline tasks.\n\n Returns\n -------\n KeyLine3DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, KeyLine3DAnnotations) linelist = [KeyLine3D(line=np.float32([[vertex.x, vertex.y, vertex.z] for vertex in ann.vertices]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations] return cls(ontology, linelist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n KeyLine3DAnnotations\n Annotation as defined in `proto/annotations.proto`\n ' return KeyLine3DAnnotations(annotations=[KeyLine3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[line.class_id], vertices=[KeyPoint3D(point=np.float32([x, y, z]), class_id=line.class_id, instance_id=line.instance_id, color=line.color, attributes=line.attributes).to_proto() for (x, y, z) in zip(line.x, line.y, line.z)], attributes=line.attributes) for line in self._linelist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation.\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self._linelist) def __getitem__(self, index): 'Return a single 3D keyline' return self._linelist[index] def render(self): 'Batch rendering function for keylines.' raise NotImplementedError @property def xyz(self): 'Return lines as (N, 3) np.ndarray in format ([x, y, z])' return np.array([line.xyz.tolist() for line in self._linelist], dtype=np.float32) @property def class_ids(self): 'Return class ID for each line, with ontology applied:\n class IDs mapped to a contiguous set.\n ' return np.array([line.class_id for line in self._linelist], dtype=np.int64) @property def attributes(self): 'Return a list of dictionaries of attribute name to value.' return [line.attributes for line in self._linelist] @property def instance_ids(self): return np.array([line.instance_id for line in self._linelist], dtype=np.int64) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto())
class KeyPoint2DAnnotationList(Annotation): 'Container for 2D keypoint annotations.\n\n Parameters\n ----------\n ontology: KeyPointOntology\n Ontology for 2D keypoint tasks.\n\n pointlist: list[KeyPoint2D]\n List of KeyPoint2D objects. See `dgp/utils/structures/key_point_2d` for more details.\n ' def __init__(self, ontology, pointlist): super().__init__(ontology) assert isinstance(self._ontology, KeyPointOntology), 'Trying to load annotation with wrong type of ontology!' for point in pointlist: assert isinstance(point, KeyPoint2D), f'Can only instantate an annotation from a list of KeyPoint2D, not {type(point)}' self.pointlist = pointlist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyPointOntology\n Ontology for 2D keypoint tasks.\n\n Returns\n -------\n KeyPoint2DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, KeyPoint2DAnnotations) pointlist = [KeyPoint2D(point=np.float32([ann.point.x, ann.point.y]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations] return cls(ontology, pointlist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n KeyPoint2DAnnotations\n Annotation as defined in `proto/annotations.proto`\n ' return KeyPoint2DAnnotations(annotations=[KeyPoint2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[point.class_id], point=point.to_proto(), attributes=point.attributes) for point in self.pointlist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self.pointlist) def __getitem__(self, index): 'Return a single 2D keypoint' return self.pointlist[index] def render(self): 'TODO: Batch rendering function for keypoints.' raise NotImplementedError @property def xy(self): 'Return points as (N, 2) np.ndarray in format ([x, y])' return np.array([point.xy for point in self.pointlist], dtype=np.float32) @property def class_ids(self): 'Return class ID for each point, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.array([point.class_id for point in self.pointlist], dtype=np.int64) @property def attributes(self): 'Return a list of dictionaries of attribut name to value.' return [point.attributes for point in self.pointlist] @property def instance_ids(self): return np.array([point.instance_id for point in self.pointlist], dtype=np.int64) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto())
class KeyPoint3DAnnotationList(Annotation): 'Container for 3D keypoint annotations.\n\n Parameters\n ----------\n ontology: KeyPointOntology\n Ontology for 3D keypoint tasks.\n\n pointlist: list[KeyPoint3D]\n List of KeyPoint3D objects. See `dgp/utils/structures/key_point_3d` for more details.\n ' def __init__(self, ontology, pointlist): super().__init__(ontology) assert isinstance(self._ontology, KeyPointOntology), 'Trying to load annotation with wrong type of ontology!' for point in pointlist: assert isinstance(point, KeyPoint3D), f'Can only instantate an annotation from a list of KeyPoint3D, not {type(point)}' self._pointlist = pointlist @classmethod def load(cls, annotation_file, ontology): 'Load annotation from annotation file and ontology.\n\n Parameters\n ----------\n annotation_file: str or bytes\n Full path to annotation or bytestring\n\n ontology: KeyPointOntology\n Ontology for 3D keypoint tasks.\n\n Returns\n -------\n KeyPoint3DAnnotationList\n Annotation object instantiated from file.\n ' _annotation_pb2 = parse_pbobject(annotation_file, KeyPoint3DAnnotations) pointlist = [KeyPoint3D(point=np.float32([ann.point.x, ann.point.y, ann.point.z]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations] return cls(ontology, pointlist) def to_proto(self): 'Return annotation as pb object.\n\n Returns\n -------\n KeyPoint3DAnnotations\n Annotation as defined in `proto/annotations.proto`\n ' return KeyPoint3DAnnotations(annotations=[KeyPoint3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[point.class_id], point=point.to_proto(), attributes=point.attributes) for point in self._pointlist]) def save(self, save_dir): 'Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>\n\n Parameters\n ----------\n save_dir: str\n Directory in which annotation is saved.\n\n Returns\n -------\n output_annotation_file: str\n Full path to saved annotation\n ' return save_pbobject_as_json(self.to_proto(), save_path=save_dir) def __len__(self): return len(self._pointlist) def __getitem__(self, index): 'Return a single 3D keypoint' return self._pointlist[index] def render(self): 'Batch rendering function for keypoints.' raise NotImplementedError @property def xyz(self): 'Return points as (N, 3) np.ndarray in format ([x, y, z])' return np.array([point.xyz for point in self._pointlist], dtype=np.float32) @property def class_ids(self): 'Return class ID for each point, with ontology applied:\n 0 is background, class IDs mapped to a contiguous set.\n ' return np.array([point.class_id for point in self._pointlist], dtype=np.int64) @property def attributes(self): 'Return a list of dictionaries of attribut name to value.' return [point.attributes for point in self._pointlist] @property def instance_ids(self): return np.array([point.instance_id for point in self._pointlist], dtype=np.int64) @property def hexdigest(self): 'Reproducible hash of annotation.' return generate_uid_from_pbobject(self.to_proto())
class Ontology(): 'Ontology object. At bare minimum, we expect ontologies to provide:\n ID: (int) identifier for class\n Name: (str) string identifier for class\n Color: (tuple) color RGB tuple\n\n Based on the task, additional fields may be populated. Refer to `dataset.proto` and `ontology.proto`\n specifications for more details. Can be constructed from file or from deserialized proto object.\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n ' VOID_ID = 255 VOID_CLASS = 'Void' def __init__(self, ontology_pb2): self._ontology = ontology_pb2 if isinstance(self._ontology, OntologyV1Pb2): self._name_to_id = OrderedDict(sorted(self._ontology.name_to_id.items())) self._id_to_name = OrderedDict(sorted(self._ontology.id_to_name.items())) self._colormap = OrderedDict(sorted([(_id, (_color.r, _color.g, _color.b)) for (_id, _color) in self._ontology.colormap.items()])) self._isthing = OrderedDict(sorted(self._ontology.isthing.items())) elif isinstance(self._ontology, OntologyV2Pb2): self._name_to_id = OrderedDict(sorted([(ontology_item.name, ontology_item.id) for ontology_item in self._ontology.items])) self._id_to_name = OrderedDict(sorted([(ontology_item.id, ontology_item.name) for ontology_item in self._ontology.items])) self._colormap = OrderedDict(sorted([(ontology_item.id, (ontology_item.color.r, ontology_item.color.g, ontology_item.color.b)) for ontology_item in self._ontology.items])) self._isthing = OrderedDict(sorted([(ontology_item.id, ontology_item.isthing) for ontology_item in self._ontology.items])) else: raise TypeError('Unexpected type {}, expected OntologyV1 or OntologyV2'.format(type(self._ontology))) self._class_ids = sorted(self._id_to_name.keys()) self._class_names = [self._id_to_name[c_id] for c_id in self._class_ids] @classmethod def load(cls, ontology_file): 'Construct an ontology from an ontology JSON.\n\n Parameters\n ----------\n ontology_file: str\n Path to ontology JSON\n\n Raises\n ------\n FileNotFoundError\n Raised if ontology_file does not exist.\n Exception\n Raised if we could not open the ontology file for some reason.\n ' if os.path.exists(ontology_file): ontology_pb2 = open_ontology_pbobject(ontology_file) else: raise FileNotFoundError('Could not find {}'.format(ontology_file)) if (ontology_pb2 is not None): return cls(ontology_pb2) raise Exception('Could not open ontology {}'.format(ontology_file)) def to_proto(self): 'Serialize ontology. Only supports exporting in OntologyV2.\n\n Returns\n -------\n OntologyV2Pb2\n Serialized ontology\n ' return OntologyV2Pb2(items=[OntologyItem(name=name, id=class_id, color=OntologyItem.Color(r=self._colormap[class_id][0], g=self._colormap[class_id][1], b=self._colormap[class_id][2]), isthing=self._isthing[class_id]) for (class_id, name) in self._id_to_name.items()]) def save(self, save_dir): 'Write out ontology items to `<sha>.json`. SHA generated from Ontology proto object.\n\n Parameters\n ----------\n save_dir: str\n Directory in which to save serialized ontology.\n\n Returns\n -------\n output_ontology_file: str\n Path to serialized ontology file.\n ' os.makedirs(save_dir, exist_ok=True) return save_pbobject_as_json(self.to_proto(), save_path=save_dir) @property def num_classes(self): return len(self._class_ids) @property def class_names(self): return self._class_names @property def class_ids(self): return self._class_ids @property def name_to_id(self): return self._name_to_id @property def id_to_name(self): return self._id_to_name @property def colormap(self): return self._colormap @property def isthing(self): return self._isthing @property def hexdigest(self): 'Hash object' return generate_uid_from_pbobject(self.to_proto()) def __eq__(self, other): return (self.hexdigest == other.hexdigest) def __repr__(self): return '{}[{}]'.format(self.__class__.__name__, os.path.basename(self.hexdigest))
class BoundingBoxOntology(Ontology): 'Implements lookup tables specific to 2D bounding box tasks.\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n ' def __init__(self, ontology_pb2): super().__init__(ontology_pb2) self._thing_class_ids = [class_id for (class_id, isthing) in self._isthing.items() if isthing] self._class_id_to_contiguous_id = OrderedDict(((class_id, (contiguous_id + 1)) for (contiguous_id, class_id) in enumerate(self._thing_class_ids))) self._contiguous_id_to_class_id = OrderedDict(((contiguous_id, class_id) for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items())) self._contiguous_id_to_name = OrderedDict(((contiguous_id, self._id_to_name[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items())) self._name_to_contiguous_id = OrderedDict(((name, contiguous_id) for (contiguous_id, name) in self._contiguous_id_to_name.items())) self._contiguous_id_colormap = OrderedDict(((contiguous_id, self._colormap[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items())) self._class_names = [self._id_to_name[c_id] for c_id in self._thing_class_ids] @property def num_classes(self): return len(self._thing_class_ids) @property def class_names(self): return self._class_names @property def thing_class_ids(self): return self._thing_class_ids @property def class_id_to_contiguous_id(self): return self._class_id_to_contiguous_id @property def contiguous_id_to_class_id(self): return self._contiguous_id_to_class_id @property def contiguous_id_to_name(self): return self._contiguous_id_to_name @property def name_to_contiguous_id(self): return self._name_to_contiguous_id @property def contiguous_id_colormap(self): return self._contiguous_id_colormap
class AgentBehaviorOntology(BoundingBoxOntology): 'Agent behavior ontologies derive directly from bounding box ontologies'
class KeyPointOntology(BoundingBoxOntology): 'Keypoint ontologies derive directly from bounding box ontologies'
class KeyLineOntology(BoundingBoxOntology): 'Keyline ontologies derive directly from bounding box ontologies'
class InstanceSegmentationOntology(BoundingBoxOntology): 'Instance segmentation ontologies derive directly from bounding box ontologies'
class SemanticSegmentationOntology(Ontology): 'Implements lookup tables for semantic segmentation\n\n Parameters\n ----------\n ontology_pb2: [OntologyV1Pb2,OntologyV2Pb2]\n Deserialized ontology object.\n ' def __init__(self, ontology_pb2): super().__init__(ontology_pb2) self._class_id_to_contiguous_id = OrderedDict(((class_id, contiguous_id) for (contiguous_id, class_id) in enumerate(self._class_ids))) self._contiguous_id_to_class_id = OrderedDict(((contiguous_id, class_id) for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items())) self._contiguous_id_to_name = OrderedDict(((contiguous_id, self._id_to_name[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items())) self._name_to_contiguous_id = OrderedDict(((name, contiguous_id) for (contiguous_id, name) in self._contiguous_id_to_name.items())) self._contiguous_id_colormap = OrderedDict(((contiguous_id, self._colormap[class_id]) for (contiguous_id, class_id) in self._contiguous_id_to_class_id.items())) self._label_lookup = (np.ones((max(self.class_ids) + 1), dtype=np.uint8) * self.VOID_ID) for (class_id, contiguous_id) in self._class_id_to_contiguous_id.items(): self._label_lookup[class_id] = contiguous_id @property def label_lookup(self): return self._label_lookup @property def class_id_to_contiguous_id(self): return self._class_id_to_contiguous_id @property def contiguous_id_to_class_id(self): return self._contiguous_id_to_class_id @property def contiguous_id_to_name(self): return self._contiguous_id_to_name @property def name_to_contiguous_id(self): return self._name_to_contiguous_id @property def contiguous_id_colormap(self): return self._contiguous_id_colormap
def remap_bounding_box_annotations(bounding_box_annotations, lookup_table, original_ontology, remapped_ontology): "\n Parameters\n ----------\n bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList\n Annotations to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: BoundingBoxOntology\n Ontology we are remapping annotations from\n\n remapped_ontology: BoundingBoxOntology\n Ontology we are mapping annotations to\n\n Returns\n -------\n remapped_bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList\n Remapped annotations with the same type of bounding_box_annotations\n " assert (isinstance(original_ontology, BoundingBoxOntology) and isinstance(remapped_ontology, BoundingBoxOntology)) remapped_boxlist = [] for box in bounding_box_annotations: original_class_name = original_ontology.contiguous_id_to_name[box.class_id] if (original_class_name in lookup_table): remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]] box.class_id = remapped_class_id remapped_boxlist.append(box) annotation_type = type(bounding_box_annotations) return annotation_type(remapped_ontology, remapped_boxlist)
def remap_semantic_segmentation_2d_annotation(semantic_segmentation_annotation, lookup_table, original_ontology, remapped_ontology): "\n Parameters\n ----------\n semantic_segmentation_annotation: SemanticSegmentation2DAnnotation\n Annotation to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: SemanticSegmentationOntology\n Ontology we are remapping annotation from\n\n remapped_ontology: SemanticSegmentationOntology\n Ontology we are mapping annotation to\n\n Returns\n -------\n remapped_semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation\n Remapped annotation\n " assert (isinstance(original_ontology, SemanticSegmentationOntology) and isinstance(remapped_ontology, SemanticSegmentationOntology)) original_segmentation_image = semantic_segmentation_annotation.label remapped_segmentation_image = (np.ones_like(original_segmentation_image) * Ontology.VOID_ID) for class_name in lookup_table: remapped_segmentation_image[(original_segmentation_image == original_ontology.name_to_contiguous_id[class_name])] = remapped_ontology.name_to_contiguous_id[lookup_table[class_name]] return SemanticSegmentation2DAnnotation(remapped_ontology, remapped_segmentation_image)
def remap_instance_segmentation_2d_annotation(instance_segmentation_annotation, lookup_table, original_ontology, remapped_ontology): "\n Parameters\n ----------\n instance_segmentation_annotation: PanopticSegmentation2DAnnotation\n Annotation to remap\n\n lookup_table: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n original_ontology: InstanceSegmentationOntology\n Ontology we are remapping annotation from\n\n remapped_ontology: InstanceSegmentationOntology\n Ontology we are mapping annotation to\n\n Returns\n -------\n PanopticSegmentation2DAnnotation:\n Remapped annotation\n " assert (isinstance(original_ontology, InstanceSegmentationOntology) and isinstance(remapped_ontology, InstanceSegmentationOntology)) remapped_masklist = [] for instance_mask in instance_segmentation_annotation: original_class_name = original_ontology.contiguous_id_to_name[instance_mask.class_id] if (original_class_name in lookup_table): remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]] instance_mask.class_id = remapped_class_id remapped_masklist.append(instance_mask) assert isinstance(instance_segmentation_annotation, PanopticSegmentation2DAnnotation) return PanopticSegmentation2DAnnotation.from_masklist(remapped_masklist, remapped_ontology, instance_segmentation_annotation.panoptic_image.shape, instance_segmentation_annotation.panoptic_image_dtype)
def construct_remapped_ontology(ontology, lookup, annotation_key): "Given an Ontology object and a lookup from old class names to new class names, construct\n an ontology proto for the new ontology that results\n\n Parameters\n ----------\n ontology: dgp.annotations.Ontology\n Ontology we are trying to remap using `lookup`\n eg. ontology.id_to_name = {0: 'Car', 1: 'Truck', 2: 'Motrocycle'}\n\n lookup: dict\n Lookup from old class names to new class names\n e.g.:\n {\n 'Car': 'Car',\n 'Truck': 'Car',\n 'Motorcycle': 'Motorcycle'\n }\n\n NOTE: `lookup` needs to be exhaustive; any classes that the user wants to have in returned\n ontology need to be remapped explicitly\n\n annotation_key: str\n Annotation key of Ontology\n e.g. `bounding_box_2d`\n\n Returns\n -------\n remapped_ontology_pb2: dgp.proto.ontology_pb2.Ontology\n Ontology defined by applying `lookup` on original `ontology`\n\n NOTE: This is constructed by iterating over class names in `lookup.keys()` in\n alphabetical order, so if both 'Car' and 'Motorcycle' get remapped to 'DynamicObject', the\n color for 'DynamicObject' will be the original color for 'Car'\n\n Any class names not in `lookup` are dropped\n\n Notes\n -----\n This could be a class function of `Ontology`\n " assert isinstance(ontology, Ontology), f'Expected Ontology, got {type(ontology)}' remapped_class_name_to_original_class_names = OrderedDict() for (class_name, remapped_class_name) in lookup.items(): if (remapped_class_name not in remapped_class_name_to_original_class_names): remapped_class_name_to_original_class_names[remapped_class_name] = [] remapped_class_name_to_original_class_names[remapped_class_name].append(class_name) remapped_class_name_to_original_class_names = {k: sorted(v) for (k, v) in remapped_class_name_to_original_class_names.items()} remapped_ontology_pb2 = OntologyPB2() for (remapped_class_id, (remapped_class_name, original_class_names)) in enumerate(remapped_class_name_to_original_class_names.items()): original_class_ids = [ontology.name_to_id[class_name] for class_name in original_class_names] isthing = [ontology.isthing[class_id] for class_id in original_class_ids] if (annotation_key == 'semantic_segmentation_2d'): isthing = False else: assert (len(set(isthing)) == 1), 'Classes mapping to the same class are either all things or all stuff' isthing = isthing[0] remapped_class_color = ontology.colormap[original_class_ids[0]] remapped_ontology_pb2.items.extend([OntologyItem(name=remapped_class_name, id=remapped_class_id, isthing=isthing, color=OntologyItem.Color(r=remapped_class_color[0], g=remapped_class_color[1], b=remapped_class_color[2]))]) if ((annotation_key == 'semantic_segmentation_2d') and (not (Ontology.VOID_CLASS in remapped_class_name_to_original_class_names))): remapped_ontology_pb2.items.extend([OntologyItem(name=Ontology.VOID_CLASS, id=Ontology.VOID_ID, isthing=False, color=OntologyItem.Color(r=0, g=0, b=0))]) return remapped_ontology_pb2
class Compose(): 'Composes several transforms together.\n\n Parameters\n ----------\n transforms\n List of transforms to compose __call__ method that takes in an OrderedDict\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n ' def __init__(self, transforms): if (not all([isinstance(t, BaseTransform) for t in transforms])): raise TypeError('All transforms used in Compose should inherit from `BaseTransform`') self.transforms = transforms def __call__(self, data): for t in self.transforms: data = t(data) return data def __repr__(self): format_string = (self.__class__.__name__ + '(') for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string
class BaseTransform(): "\n Base transform class that other transforms should inherit from. Simply ensures that\n input type to `__call__` is an OrderedDict (in general usage this dict will include\n keys such as 'rgb', 'bounding_box_2d', etc. i.e. raw data and annotations)\n\n cf. `OntologyMapper` for an example\n " def __call__(self, data): if ((not isinstance(data, OrderedDict)) and (not (isinstance(data, list) and isinstance(data[0], list) and isinstance(data[0][0], OrderedDict)))): raise TypeError('`BaseTransform` expects input of type `OrderedDict` or list of list of `OrderedDict`.') return self.transform(data) def transform(self, data): "\n Parameters\n ----------\n data: OrderedDict or list[list[OrderedDict]]\n dataset item as returned by `_SynchronizedDataset' or `_FrameDataset`.\n\n Returns\n -------\n OrderedDict or list[list[OrderedDict]]:\n Same type with input with transformations applied to dataset item.\n " if isinstance(data, OrderedDict): return self.transform_datum(data) elif isinstance(data, list): return [self.transform_sample(sample) for sample in data] def transform_datum(self, datum): raise NotImplementedError def transform_sample(self, sample): raise NotImplementedError
class OntologyMapper(BaseTransform): '\n Mapping ontology based on a lookup_table.\n The remapped ontology will base on the remapped_ontology_table if provided.\n Otherwise, the remapped ontology will be automatically constructed based on the order of lookup_table.\n\n Parameters\n ----------\n original_ontology_table: dict[str->dgp.annotations.Ontology]\n Ontology object *per annotation type*\n The original ontology table.\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n "bounding_box_3d": BoundingBoxOntology[<ontology_sha>],\n }\n\n lookup_table: dict[str->dict]\n Lookup table *per annotation type* for each of the classes the user wants to remap.\n Lookups are old class name to new class name\n\n e.g.:\n {\n \'bounding_box_2d\': {\n \'Car\': \'Car\',\n \'Truck\': \'Car\',\n \'Motorcycle\': \'Motorcycle\'\n }\n ...\n }\n\n remapped_ontology_table: dict[str->dgp.annotations.Ontology]\n Ontology object *per annotation type*\n If specified, the ontology will be remapped to the given remapped_ontology_table.\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n "bounding_box_3d": BoundingBoxOntology[<ontology_sha>],\n }\n ' SUPPORTED_ANNOTATION_TYPES = ('bounding_box_2d', 'semantic_segmentation_2d', 'bounding_box_3d', 'instance_segmentation_2d') def __init__(self, original_ontology_table, lookup_table, remapped_ontology_table=None): for annotation_key in lookup_table: if (annotation_key not in self.SUPPORTED_ANNOTATION_TYPES): raise ValueError(f'annotation_key {annotation_key} not supported for remapping yet, we accept PRs') if (annotation_key not in original_ontology_table): raise ValueError(f'annotation_key {annotation_key} needs to be present in `ontology_table`') self.lookup_table = lookup_table self.original_ontology_table = original_ontology_table self.remapped_ontology_table = {} for (annotation_key, lookup) in self.lookup_table.items(): assert all([(class_name in original_ontology_table[annotation_key].class_names) for class_name in lookup.keys()]), 'All keys in `lookup` need to be valid class names in specified `ontology`' if ((remapped_ontology_table is not None) and (annotation_key in remapped_ontology_table)): self.remapped_ontology_table[annotation_key] = remapped_ontology_table[annotation_key] else: remapped_ontology_pb2 = construct_remapped_ontology(original_ontology_table[annotation_key], lookup, annotation_key) self.remapped_ontology_table[annotation_key] = ONTOLOGY_REGISTRY[annotation_key](remapped_ontology_pb2) assert all([(class_name in self.remapped_ontology_table[annotation_key].class_names) for class_name in lookup.values()]), 'All values in `lookup` need to be valid class names in specified `remapped_ontology`' def transform_datum(self, datum): "\n Parameters\n ----------\n datum: OrderedDict\n Dictionary containing raw data and annotations, with keys such as:\n 'rgb', 'intrinsics', 'bounding_box_2d'.\n All annotation_keys in `self.lookup_table` (and `self.remapped_ontology_table`)\n are expected to be contained\n\n Returns\n -------\n datum: OrderedDict\n Same dictionary but with annotations in `self.lookup_table` remapped to desired ontologies\n\n Raises\n ------\n ValueError\n Raised if the datum to remap does not contain all expected annotations.\n " if (not all([(annotation_key in datum) for annotation_key in self.remapped_ontology_table])): raise ValueError('The data you are trying to remap does not have all annotations it expects') for (annotation_key, remapped_ontology) in self.remapped_ontology_table.items(): lookup_table = self.lookup_table[annotation_key] original_ontology = datum[annotation_key].ontology if ((annotation_key == 'bounding_box_2d') or (annotation_key == 'bounding_box_3d')): datum[annotation_key] = remap_bounding_box_annotations(datum[annotation_key], lookup_table, original_ontology, remapped_ontology) elif (annotation_key == 'semantic_segmentation_2d'): datum[annotation_key] = remap_semantic_segmentation_2d_annotation(datum[annotation_key], lookup_table, original_ontology, remapped_ontology) elif (annotation_key == 'instance_segmentation_2d'): datum[annotation_key] = remap_instance_segmentation_2d_annotation(datum[annotation_key], lookup_table, original_ontology, remapped_ontology) return datum
class AddLidarCuboidPoints(BaseTransform): 'Populate the num_points field for bounding_box_3d' def __init__(self, subsample: int=1) -> None: 'Populate the num_points field for bounding_box_3d. Optionally downsamples the point cloud for speed.\n\n Parameters\n ----------\n subsample: int, default: 1\n Fraction of point cloud to use for computing the number of points. i.e., subsample=10 indicates that\n 1/10th of the points should be used.\n ' super().__init__() self.subsample = subsample def transform_datum(self, datum: Dict[(str, Any)]) -> Dict[(str, Any)]: 'Populate the num_points field for bounding_box_3d\n Parameters\n ----------\n datum: Dict[str,Any]\n A dgp lidar or point cloud datum. Must contain keys bounding_box_3d and point_cloud\n\n Returns\n -------\n datum: Dict[str,Any]\n The datum with num_points added to the cuboids\n ' if ('bounding_box_3d' not in datum): return datum boxes = datum['bounding_box_3d'] if ((boxes is None) or (len(boxes) == 0)): return datum assert ('point_cloud' in datum), 'datum should contain point_cloud key' point_cloud = datum['point_cloud'] if (self.subsample > 1): N = point_cloud.shape[0] sample_idx = np.random.choice(N, (N // self.subsample)) point_cloud = point_cloud[sample_idx].copy() for box in boxes: if (box.num_points == 0): in_cuboid = points_in_cuboid(point_cloud, box) box._num_points = (np.sum(in_cuboid) * self.subsample) return datum
class InstanceMaskVisibilityFilter(BaseTransform): 'Given a multi-modal camera data, select instances whose instance masks appear big enough *at least in one camera*.\n\n For example, even when an object is mostly truncated in one camera, if it looks big enough in a neighboring\n camera in the multi-modal sample, it will be included in the annotations. In the transformed dataset item, all detection\n annotations (i.e. `bounding_box_3d`, `bounding_box_2d`, `instance_segmentation_2d\') contain a single set of instances.\n\n Parameters\n ----------\n camera_datum_names: list[str]\n Names of camera datums to be used in visibility computation.\n The definition of "visible" is that an instance has large mask at least in one of these cameras.\n\n min_mask_size: int, default: 300\n Minimum number of foreground pixels in instance mask for an instance to be added to annotations.\n\n use_amodal_bbox2d_annotations: bool, default: False\n If True, then use "amodal" bounding box (i.e. the box includes occluded/truncated parts) for 2D bounding box annotation.\n If False, then use "modal" bounding box (i.e. tight bounding box of instance mask.)\n ' def __init__(self, camera_datum_names, min_mask_size=300, use_amodal_bbox2d_annotations=False): self._camera_datum_names = camera_datum_names self._min_mask_size = min_mask_size self._use_amodal_bbox2d_annotations = use_amodal_bbox2d_annotations def transform_sample(self, sample): 'Main entry point for filtering a multimodal sample using instance masks.\n\n Parameters\n ----------\n sample: list[OrderedDict]\n Multimodal sample as returned by `__getitem__()` of `_SynchronizedDataset`.\n\n Returns\n -------\n new_sample: list[OrderedDict]\n Multimodal sample with all detection annotations are filtered.\n\n Raises\n ------\n ValueError\n Raised if a 2D or 3D bounding box instance lacks any required instance IDs.\n ' cam_datums = [datum for datum in sample if (datum['datum_name'] in self._camera_datum_names)] visible_instance_ids = set() in_frustum_instance_ids_per_camera = {} (id_to_bbox3d_per_camera, id_to_mask2d_per_camera, id_to_bbox2d_per_camera) = ({}, {}, {}) for datum in cam_datums: datum_name = datum['datum_name'] in_frustum_instance_ids_per_camera[datum_name] = [mask.instance_id for mask in datum['instance_segmentation_2d']] id_to_bbox3d = {bbox3d.instance_id: bbox3d for bbox3d in datum['bounding_box_3d']} id_to_mask2d = {mask2d.instance_id: mask2d for mask2d in datum['instance_segmentation_2d']} id_to_bbox3d_per_camera[datum_name] = id_to_bbox3d id_to_mask2d_per_camera[datum_name] = id_to_mask2d if self._use_amodal_bbox2d_annotations: id_to_bbox2d = {bbox2d.instance_id: bbox2d for bbox2d in datum['bounding_box_2d']} id_to_bbox2d_per_camera[datum_name] = id_to_bbox2d if self._use_amodal_bbox2d_annotations: in_frustum_instance_ids_per_camera[datum_name] = [_id for _id in in_frustum_instance_ids_per_camera[datum_name] if ((_id in id_to_bbox2d) and (_id in id_to_bbox3d))] else: in_frustum_instance_ids_per_camera[datum_name] = [_id for _id in in_frustum_instance_ids_per_camera[datum_name] if (_id in id_to_bbox3d)] ids_missing_in_bbox3d = list((set(in_frustum_instance_ids_per_camera[datum_name]) - set(id_to_bbox3d))) if ids_missing_in_bbox3d: raise ValueError('Missing instances from `bounding_box_3d`: {:s}'.format(', '.join(sorted(ids_missing_in_bbox3d)))) if self._use_amodal_bbox2d_annotations: ids_missing_in_bbox2d = list((set(in_frustum_instance_ids_per_camera[datum_name]) - set(id_to_bbox2d))) if ids_missing_in_bbox2d: raise ValueError('Missing instances from `bounding_box_2d`: {:s}'.format(', '.join(sorted(ids_missing_in_bbox2d)))) for instance_mask in datum['instance_segmentation_2d']: if (instance_mask.area >= self._min_mask_size): visible_instance_ids.add(instance_mask.instance_id) new_sample = sample for datum in new_sample: datum_name = datum['datum_name'] if (datum_name not in self._camera_datum_names): continue (new_boxlist_3d, new_boxlist_2d, new_masklist_2d) = ([], [], []) for instance_id in in_frustum_instance_ids_per_camera[datum_name]: if (instance_id in visible_instance_ids): new_boxlist_3d.append(id_to_bbox3d_per_camera[datum_name][instance_id]) mask2d = id_to_mask2d_per_camera[datum_name][instance_id] if self._use_amodal_bbox2d_annotations: bbox2d = id_to_bbox2d_per_camera[datum_name][instance_id] else: (yy, xx) = mask2d.bitmask.nonzero() (y1, y2) = (np.min(yy), np.max(yy)) (x1, x2) = (np.min(xx), np.max(xx)) bbox2d = BoundingBox2D(box=np.float32([x1, y1, x2, y2]), class_id=mask2d.class_id, instance_id=mask2d.instance_id, attributes=mask2d.attributes, mode='ltrb') new_boxlist_2d.append(bbox2d) new_masklist_2d.append(mask2d) datum['bounding_box_3d'] = BoundingBox3DAnnotationList(datum['bounding_box_3d'].ontology, new_boxlist_3d) datum['bounding_box_2d'] = BoundingBox2DAnnotationList(datum['bounding_box_2d'].ontology, new_boxlist_2d) datum['instance_segmentation_2d'] = PanopticSegmentation2DAnnotation.from_masklist(new_masklist_2d, datum['instance_segmentation_2d'].ontology, mask_shape=(datum['rgb'].height, datum['rgb'].width)) return new_sample def transform_datum(self, datum): 'Main entry point for filtering a single-modal datum using instance masks.\n\n Parameters\n ----------\n datum: OrderedDict\n Single-modal datum as returned by `__getitem__()` of `_FrameDataset`.\n\n Returns\n -------\n new_datum: OrderedDict\n Single-modal sample with all detection annotations are filtered.\n ' return self.transform_sample([datum])[0]
class BoundingBox3DCoalescer(BaseTransform): 'Coalesce 3D bounding box annotation from multiple datums and use it as an annotation of target datum.\n The bounding boxes are brought into the target datum frame.\n\n Parameters\n ----------\n src_datum_names: list[str]\n List of datum names used to create a list of coalesced bounding boxes.\n\n dst_datum_name: str\n Datum whose `bounding_box_3d` is replaced by the coelesced bounding boxes.\n\n drop_src_datums: bool, default: True\n If True, then remove the source datums in the transformed sample.\n ' def __init__(self, src_datum_names, dst_datum_name, drop_src_datums=True): self._src_datum_names = src_datum_names self._dst_datum_name = dst_datum_name self._drop_src_datums = drop_src_datums def transform_sample(self, sample): 'Main entry point for coalescing 3D bounding boxes.\n\n Parameters\n ----------\n sample: list[OrderedDict]\n Multimodal sample as returned by `__getitem__()` of `_SynchronizedDataset`.\n\n Returns\n -------\n new_sample: list[OrderedDict]\n Multimodal sample with updated 3D bounding box annotations.\n\n Raises\n ------\n ValueError\n Raised if there are multiple instances of the same kind of datum in a sample.\n ' (datums, src_datum_inds, dst_datum_ind) = (OrderedDict(), [], []) for (idx, datum) in enumerate(sample): if (datum['datum_name'] in self._src_datum_names): src_datum_inds.append(idx) elif (datum['datum_name'] == self._dst_datum_name): dst_datum_ind.append(idx) datums[idx] = datum if (len(dst_datum_ind) != 1): raise ValueError('There must be one {:s} datum.'.format(self._dst_datum_name)) dst_datum_ind = dst_datum_ind[0] (bbox_3d_V_merged, instance_ids_merged) = ([], []) dst_datum = datums[dst_datum_ind] for idx in src_datum_inds: src_datum = datums[idx] p_src_dst = (dst_datum['pose'].inverse() * src_datum['pose']) for bbox_3d in src_datum['bounding_box_3d']: if (bbox_3d.instance_id not in instance_ids_merged): instance_ids_merged.append(bbox_3d.instance_id) bbox_3d_V_merged.append((p_src_dst * bbox_3d)) ontology = dst_datum['bounding_box_3d'].ontology coalesced_bbox3d_annotation = BoundingBox3DAnnotationList(ontology, bbox_3d_V_merged) dst_datum['bounding_box_3d'] = coalesced_bbox3d_annotation transformed_sample = [] for (idx, datum) in enumerate(sample): if (idx in src_datum_inds): if (not self._drop_src_datums): transformed_sample.append(datum) elif (idx == dst_datum_ind): transformed_sample.append(dst_datum) else: transformed_sample.append(datum) return transformed_sample
@click.group() @click.version_option() def cli(): logging.getLogger().setLevel(level=logging.INFO)
@cli.command(name='visualize-scene') @add_options(options=VISUALIZE_OPTIONS) @click.option('--scene-json', required=True, help='Path to Scene JSON') def visualize_scene(scene_json, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items, video_fps, dst_dir, verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud, render_raw): 'Parallelized visualizing of a scene.\n\n Example\n -------\n $ cli.py visualize-scene --scene-json tests/data/dgp/test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json --dst-dir /mnt/fsx -a bounding_box_3d -c camera_01\n ' if verbose: logging.basicConfig(level=logging.INFO) base_path = os.path.dirname(scene_json) if (dst_dir is not None): video_path = (os.path.basename(base_path) + '.avi') logging.info('Visualizing scene {} into {}'.format(os.path.basename(base_path), dst_dir)) else: video_file = None scene_dataset_class = (ParallelDomainScene if (dataset_class == 'ParallelDomainScene') else SynchronizedScene) annotations_2d = tuple([a for a in annotations if (ANNOTATION_TYPE_TO_ANNOTATION_GROUP[a] == '2d')]) if annotations_2d: dataset = scene_dataset_class(scene_json, datum_names=camera_datum_names, requested_annotations=annotations_2d, only_annotated_datums=True) if len(dataset): if (dst_dir is not None): os.makedirs(os.path.join(dst_dir, '2d'), exist_ok=True) video_file = os.path.join(dst_dir, '2d', video_path) visualize_dataset_2d(dataset, camera_datum_names=camera_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, max_num_items=max_num_items, show_instance_id=show_instance_id) logging.info('Visualizing 2D annotation visualizations to {}'.format(video_file)) else: logging.info('Scene {} does not contain any of the requested datums {} annotated with {}. Skip 2d visualization.'.format(scene_json, camera_datum_names, annotations_2d)) annotations_3d = tuple([a for a in annotations if (ANNOTATION_TYPE_TO_ANNOTATION_GROUP[a] == '3d')]) if (annotations_3d or render_pointcloud or render_radar_pointcloud): datum_names = ((list(camera_datum_names) + list(lidar_datum_names)) + list(radar_datum_names)) dataset = SynchronizedScene(scene_json, datum_names=datum_names, requested_annotations=annotations_3d, only_annotated_datums=True) if len(dataset): if (dst_dir is not None): os.makedirs(os.path.join(dst_dir, '3d'), exist_ok=True) video_file = os.path.join(dst_dir, '3d', video_path) visualize_dataset_3d(dataset, camera_datum_names=camera_datum_names, lidar_datum_names=lidar_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, render_pointcloud_on_images=render_pointcloud, max_num_items=max_num_items, show_instance_id_on_bev=show_instance_id, radar_datum_names=radar_datum_names, render_radar_pointcloud_on_images=render_radar_pointcloud) logging.info('Visualizing 3D annotation visualizations to {}'.format(video_file)) else: logging.info('Scene {} does not contain any of the requested samples {} annotated with {}. Skip 3d visualization.'.format(scene_json, datum_names, annotations_3d)) if render_raw: datum_names = ((list(camera_datum_names) + list(lidar_datum_names)) + list(radar_datum_names)) dataset = SynchronizedScene(scene_json, datum_names=datum_names, only_annotated_datums=False) if len(dataset): if (dst_dir is not None): os.makedirs(os.path.join(dst_dir, 'raw'), exist_ok=True) video_file = os.path.join(dst_dir, 'raw', video_path) visualize_dataset_3d(dataset, camera_datum_names=camera_datum_names, lidar_datum_names=lidar_datum_names, caption_fn=partial(make_caption, prefix=base_path), output_video_file=video_file, output_video_fps=video_fps, render_pointcloud_on_images=render_pointcloud, max_num_items=max_num_items, show_instance_id_on_bev=False, radar_datum_names=radar_datum_names, render_radar_pointcloud_on_images=render_radar_pointcloud) logging.info('Visualizing raw sensory data visualizations to {}'.format(video_file)) else: logging.info('Scene {} does not contain any of the requested samples {}. Skip visualization.'.format(scene_json, datum_names))
@cli.command(name='visualize-scenes') @click.option('--scene-dataset-json', required=True, help='Path to SceneDataset JSON') @click.option('--split', type=click.Choice(['train', 'val', 'test', 'train_overfit']), required=True, help='Dataset split to be fetched.') @add_options(options=VISUALIZE_OPTIONS) def visualize_scenes(scene_dataset_json, split, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items, video_fps, dst_dir, verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud, render_raw): 'Parallelized visualizing of scene dataset.\n\n Example\n -------\n $ cli.py visualize-scenes --scene-dataset-json tests/data/dgp/test_scene/scene_dataset_v1.0.json --dst-dir /mnt/fsx -a bounding_box_3d --split train -c camera_01\n ' if verbose: logging.basicConfig(level=logging.INFO) dataset = open_pbobject(scene_dataset_json, pb_class=SceneDataset) if (dst_dir is not None): dataset_directory = os.path.join(dst_dir, os.path.basename(scene_dataset_json).split('.')[0]) os.makedirs(dataset_directory, exist_ok=True) logging.info('Visualizing dataset into {}'.format(dataset_directory)) else: dataset_directory = None scene_jsons = dataset.scene_splits[DATASET_SPLIT_NAME_TO_KEY[split]].filenames for scene_json in scene_jsons: scene_json = os.path.join(os.path.dirname(scene_dataset_json), scene_json) visualize_scene.callback(scene_json, annotations=annotations, camera_datum_names=camera_datum_names, dataset_class=dataset_class, show_instance_id=show_instance_id, max_num_items=max_num_items, video_fps=video_fps, dst_dir=dataset_directory, verbose=verbose, lidar_datum_names=lidar_datum_names, render_pointcloud=render_pointcloud, radar_datum_names=radar_datum_names, render_radar_pointcloud=render_radar_pointcloud, render_raw=render_raw)
class AddLidarCuboidPointsContext(AddLidarCuboidPoints): 'Add Lidar Points but applied to samples not datums' def __call__(self, sample: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]: new_sample = [] for datum in sample: if ((datum['datum_type'] == 'point_cloud') and ('bounding_box_3d' in datum)): if (datum['bounding_box_3d'] is not None): datum = super().__call__(datum) new_sample.append(datum) return new_sample
class ScaleImages(ScaleAffineTransform): 'Scale Transform but applied to samples not datums' def __call__(self, sample: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]: new_sample = [] for datum in sample: if ((datum['datum_type'] == 'image') and ('rgb' in datum)): datum = super().__call__(datum) new_sample.append(datum) return new_sample
@click.group() @click.version_option() def cli(): logging.getLogger('dgp2widker').setLevel(level=logging.INFO) logging.getLogger('py4j').setLevel(level=logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('PIL').setLevel(logging.CRITICAL)
@cli.command(name='ingest') @click.option('--scene-dataset-json', required=True, help='Path to DGP Dataset JSON') @click.option('--wicker-dataset-name', required=True, default=None, help='Name of dataset in Wicker') @click.option('--wicker-dataset-version', required=True, help='Version of dataset in Wicker') @click.option('--datum-names', required=True, help='List of datum names') @click.option('--requested-annotations', help='List of annotation types') @click.option('--only-annotated-datums', is_flag=True, help='Apply only annotated datums') @click.option('--max-num-scenes', required=False, default=None, help='The maximum number of scenes to process') @click.option('--max-len', required=False, default=1000, help='The maximum number of samples per scene') @click.option('--chunk-size', required=False, default=1000, help='The number of samples per chunk') @click.option('--skip-camera-cuboids', is_flag=True, help='If True, skip cuboids for non lidar datums') @click.option('--num-partitions', required=False, default=None, help='Number of scene partitions') @click.option('--num-repartitions', required=False, default=None, help='Number of sample partitions') @click.option('--is-pd', is_flag=True, help='If true, process the dataset with ParallelDomainScene') @click.option('--data-uri', required=False, default=None, help='Alternate location for scene data') @click.option('--add-lidar-points', is_flag=True, help='Add lidar point count to lidar cuboids') @click.option('--half-size-images', is_flag=True, help='Resize image datums to half size') @click.option('--alternate-scene-uri', required=False, default=None, help='Alternate scene locaiton to sync') def ingest(scene_dataset_json, wicker_dataset_name, wicker_dataset_version, datum_names, requested_annotations, only_annotated_datums, max_num_scenes, max_len, chunk_size, skip_camera_cuboids, num_partitions, num_repartitions, is_pd, data_uri, add_lidar_points, half_size_images, alternate_scene_uri): datum_names = [x.strip() for x in datum_names.split(',')] requested_annotations = ([x.strip() for x in requested_annotations.split(',')] if requested_annotations else None) dataset_kwargs = {'datum_names': datum_names, 'requested_annotations': requested_annotations, 'only_annotated_datums': only_annotated_datums} pipeline = [] if add_lidar_points: pipeline.append(AddLidarCuboidPointsContext()) if half_size_images: pipeline.append(ScaleImages(s=0.5)) results = ingest_dgp_to_wicker(scene_dataset_json=scene_dataset_json, wicker_dataset_name=wicker_dataset_name, wicker_dataset_version=wicker_dataset_version, dataset_kwargs=dataset_kwargs, spark_context=None, pipeline=pipeline, max_num_scenes=max_num_scenes, max_len=max_len, chunk_size=chunk_size, skip_camera_cuboids=skip_camera_cuboids, num_partitions=num_partitions, num_repartitions=num_repartitions, is_pd=is_pd, data_uri=data_uri, alternate_scene_uri=alternate_scene_uri) print('Finished ingest!') print(results)
def compute_columns(datum_names: List[str], datum_types: List[str], requested_annotations: List[str], cuboid_datum: Optional[str]=None, with_ontology_table: bool=True) -> List[str]: "Method to parse requested datums, types, and annotations into keys for fetching from wicker.\n\n Parameters\n ----------\n datum_names: List\n List of datum names to load.\n\n datum_types: List\n List of datum types i.e, 'image', 'point_cloud', 'radar_point_cloud'.\n\n requested_annotations: List\n List of annotation types to load i.e. 'bounding_box_3d', 'depth' etc.\n\n cuboid_datum: str, default: None\n Optional datum name to restrict loading of bounding_box_3d annotations to a single datum.\n For example if we do not desire to load bounding_box_3d for both the lidar datum and every\n image datum, we would set this field to 'lidar'.\n\n with_ontology_table: bool, default: True\n Flag to add loading of ontology tables\n\n Returns\n -------\n columns_to_load: List\n A list of keys to fetch from wicker.\n " columns_to_load = ['scene_index', 'scene_uri', 'sample_index_in_scene'] for (datum_name, datum_type) in zip(datum_names, datum_types): fields = ['timestamp', 'pose', 'extrinsics', 'datum_type'] if (datum_type == 'image'): fields.extend(['intrinsics', 'rgb', 'distortion']) elif (datum_type == 'point_cloud'): fields.extend(['point_cloud', 'extra_channels']) elif (datum_type == 'radar_point_cloud'): fields.extend(['point_cloud', 'extra_channels', 'velocity', 'covariance']) if (requested_annotations is not None): fields.extend(requested_annotations) for annotation in fields: if ((datum_type, annotation) in ILLEGAL_COMBINATIONS): continue if ((annotation == 'bounding_box_3d') and (cuboid_datum is not None)): if (datum_name != cuboid_datum): continue columns_to_load.append(gen_wicker_key(datum_name, annotation)) if (with_ontology_table and (requested_annotations is not None)): for ann in requested_annotations: if (ann in ANNOTATION_REGISTRY): if (ann == 'depth'): continue columns_to_load.append(gen_wicker_key('ontology', ann)) return columns_to_load
class DGPS3Dataset(S3Dataset): '\n S3Dataset for data stored in dgp synchronized scene format in wicker. This is a baseclass\n inteded for use with all DGP wicker datasets. It handles conversion from wicker binary formats\n to DGP datum and annotation objects\n ' def __init__(self, *args: Any, wicker_sample_index: Optional[List[List[int]]]=None, **kwargs: Any) -> None: "S3Dataset for data stored in dgp synchronized scene format in wicker. This is a baseclass\n inteded for use with all DGP wicker datasets. It handles conversion from wicker binary formats\n to DGP datum and annotation objects.\n\n Parameters\n ----------\n wicker_sample_index: List[List[int]], default: None\n A mapping from this dataset's index to a list of wicker indexes. If None, a mappind for all\n single frames will be generated. \n " super().__init__(*args, **kwargs) self.wicker_sample_index: List[List[int]] = [[]] if (wicker_sample_index is None): N = super().__len__() self.wicker_sample_index = [[k] for k in range(N)] else: self.wicker_sample_index = wicker_sample_index self._ontology_table: Optional[Dict[(str, Ontology)]] = None @property def ontology_table(self) -> Optional[Dict[(str, Ontology)]]: 'Return the ontology table if any.\n\n Returns\n -------\n ontology_table: Dict\n The ontology table or None if an ontology table has not been assigned with self._create_ontology_table.\n ' return self._ontology_table def __len__(self) -> int: ' Number of samples in dataset\n\n Returns\n -------\n length: int\n The number of samples in the dataset\n ' return len(self.wicker_sample_index) def _create_ontology_table(self, raw_wicker_sample: Dict[(str, Any)]) -> Dict[(str, Ontology)]: '"Create ontology table based on given wicker item.\n\n Parameters\n ----------\n raw_wicker_sample: Dict\n A raw wicker sample containing ontology keys, ex: ontology___bounding_box_3d etc.\n\n Returns\n -------\n ontology_table: Dict\n A dictionary keyed by annotation name holding an ontology for that annotation.\n ' ontology_table = {} ontology_keys = [key for key in raw_wicker_sample if ('ontology' in key)] for key in ontology_keys: (_, ontology_type) = parse_wicker_key(key) serializer = OntologySerializer(ontology_type) ontology_table[ontology_type] = serializer.unserialize(raw_wicker_sample[key]) return ontology_table def _process_raw_wicker_sample(self, raw_wicker_sample: Dict[(str, Any)]) -> Dict[(str, Dict[(str, Any)])]: 'Parse raw data from wicker into datums/fields.\n\n Parameters\n ----------\n raw_wicker_sample: Dict\n The raw output from wicker S3Dataset.\n\n Returns\n -------\n sample_dict: Dict[str, Dict[str,Any]]\n A dictionary keyed by datum name holding DGP SynchronizedScene like datums.\n ' ontology_table = self._create_ontology_table(raw_wicker_sample) if (self.ontology_table is None): self._ontology_table = ontology_table else: assert (set(ontology_table.keys()) == set(self.ontology_table.keys())) for field in self.ontology_table: assert (self.ontology_table[field] == ontology_table[field]) output_dict: Dict[(str, Dict[(str, Any)])] = defaultdict(OrderedDict) for (key, raw) in raw_wicker_sample.items(): if (key in ['scene_uri', 'scene_index', 'sample_index_in_scene']): output_dict['meta'][key] = raw continue if ('ontology' in key): continue (datum_name, field) = parse_wicker_key(key) serializer = FIELD_TO_WICKER_SERIALIZER[field]() if hasattr(serializer, 'ontology'): serializer.ontology = self.ontology_table[field] output_dict[datum_name]['datum_name'] = datum_name output_dict[datum_name][field] = serializer.unserialize(raw) if ('meta' in output_dict): output_dict['meta']['datum_name'] = 'meta' output_dict['meta']['datum_type'] = 'meta' return output_dict def __getitem__(self, index: int) -> List[Dict[(str, Dict[(str, Any)])]]: '"Get the dataset item at index.\n\n Parameters\n ----------\n index: int\n The index to get.\n\n Returns\n -------\n context: List\n A context window with samples as dicts keyed by datum name.\n ' wicker_samples = self.wicker_sample_index[index] context = [] for idx in wicker_samples: raw = super().__getitem__(idx) sample = self._process_raw_wicker_sample(raw) context.append(sample) return context
def gen_wicker_key(datum_name: str, field: str) -> str: "Generate a key from a datum name and field i.e 'rgb', 'pose' etc\n\n Parameters\n ----------\n datum_name: str\n The name of the datum\n\n field: str\n The field of the datum\n\n Returns\n -------\n key: str\n The wicker key name formed from datum_name and field\n " return f'{datum_name}{WICKER_KEY_SEPARATOR}{field}'
def parse_wicker_key(key: str) -> Tuple[(str, str)]: 'Parse a wicker dataset key into a datum and field combination\n\n Parameters\n ----------\n key: str\n The wicker key name formed from datum_name and field\n\n Returns\n -------\n datum_name: str\n The name of the datum\n\n field: str\n The field of the datum\n ' return tuple(key.split(WICKER_KEY_SEPARATOR))
def wicker_types_from_sample(sample: List[List[Dict]], ontology_table: Optional[Dict]=None, skip_camera_cuboids: bool=True) -> Dict[(str, Any)]: 'Get the wicker keys and types from an existing dgp sample.\n\n Parameters\n ----------\n sample: List[List[Dict]]\n SynchronizedSceneDataset-style sample datum.\n\n ontology_table: Dict, default: None\n A dictionary mapping annotation key(s) to Ontology(s).\n\n skip_camera_cuboids: bool, default: True\n Flag to skip processing bounding_box_3d for image datums\n\n Returns\n -------\n wicker_types: List\n The Wicker schema types corresponding to the `wicker_keys`.\n ' wicker_types = {} for datum in sample: datum_name = datum['datum_name'] datum_type = datum['datum_type'] for (k, v) in datum.items(): if ((k == 'datum_name') or ((datum_type, k) in ILLEGAL_COMBINATIONS)): continue if ((datum_type == 'image') and (k == 'bounding_box_3d') and skip_camera_cuboids): continue key = gen_wicker_key(datum_name, k) serializer = FIELD_TO_WICKER_SERIALIZER[k] wicker_types[key] = serializer().schema(key, v) if (ontology_table is not None): for (k, v) in ontology_table.items(): key = gen_wicker_key('ontology', k) wicker_types[key] = ws.OntologySerializer(k).schema(key, v) wicker_types['scene_index'] = IntField('scene_index') wicker_types['sample_index_in_scene'] = IntField('sample_index_in_scene') wicker_types['scene_uri'] = StringField('scene_uri') return wicker_types
def dgp_to_wicker_sample(sample: List[List[Dict]], wicker_keys: List[str], scene_index: Optional[int], sample_index_in_scene: Optional[int], ontology_table: Optional[Dict], scene_uri: Optional[str]) -> Dict: 'Convert a DGP sample to the Wicker format.\n\n Parameters\n ----------\n sample: List[List[Dict]]\n SynchronizedSceneDataset-style sample datum.\n\n wicker_keys: List[str]\n Keys to be used in Wicker.\n\n scene_index: int, default: None\n Index of current scene.\n\n sample_index_in_scene: int, default: None\n Index of the sample in current scene.\n\n ontology_table: Dict, default: None\n A dictionary mapping annotation key(s) to Ontology(s).\n\n scene_uri: str\n Relative path to this specific scene json file.\n\n Returns\n -------\n wicker_sample: Dict\n DGP sample in the Wicker format.\n ' wicker_sample = {} for datum in sample: datum_name = datum['datum_name'] for (k, v) in datum.items(): key = gen_wicker_key(datum_name, k) if (key not in wicker_keys): continue serializer = FIELD_TO_WICKER_SERIALIZER[k] wicker_sample[key] = serializer().serialize(v) if (ontology_table is not None): for (k, v) in ontology_table.items(): key = gen_wicker_key('ontology', k) wicker_sample[key] = ws.OntologySerializer(k).serialize(v) wicker_sample['scene_index'] = scene_index wicker_sample['sample_index_in_scene'] = sample_index_in_scene wicker_sample['scene_uri'] = scene_uri return wicker_sample
def get_scenes(scene_dataset_json: str, data_uri: Optional[str]=None) -> List[Tuple[(int, str, str)]]: 'Get all the scene files from scene_dataset_json\n\n Parameters\n ----------\n scene_dataset_json: str\n Path ot dataset json in s3 or local.\n\n data_uri: str, default: None\n Optional path to location of raw data. If None, we assume the data is stored alongside scene_dataset_json.\n\n Returns\n -------\n scenes: List[int,str,str]\n A list of tuples(<index>, <split name>, <path to scene.json>) for each scene in scene_dataset_json.\n ' if (data_uri is None): data_uri = os.path.dirname(scene_dataset_json) dataset = open_pbobject(scene_dataset_json, SceneDataset) split_id_to_name = {dataset_pb2.TRAIN: 'train', dataset_pb2.VAL: 'val', dataset_pb2.TEST: 'test', dataset_pb2.TRAIN_OVERFIT: 'train_overfit'} scenes = [] for k in dataset.scene_splits: files = [(split_id_to_name[k], os.path.join(data_uri, f)) for f in dataset.scene_splits[k].filenames] scenes.extend(files) logger.info(f'found {len(files)} in split {split_id_to_name[k]}') logger.info(f'found {len(scenes)} in {scene_dataset_json}') scenes = [(k, *x) for (k, x) in enumerate(scenes)] return scenes
def chunk_scenes(scenes: List[Tuple[(int, str, str)]], max_len: int=200, chunk_size: int=100) -> List[Tuple[(int, str, str, Tuple[(int, int)])]]: 'Split each scene into chunks of max length chunk_size samples.\n\n Parameters\n ----------\n scenes: List[str,str]\n List of scene split/path tuples.\n\n max_len: int, default: 200\n Expected maximum length of each scene.\n\n chunk_size: int, default: 100\n Maximum size of each chunk.\n\n Returns\n -------\n scenes: List[Tuple[int,str,str,Tuple[int,int]]]\n A list of scenes with (<index>, <split>, <path>, (<sample index start>, <sample index end>)) tuples\n ' new_scenes = [] for c in range((max_len // chunk_size)): chunk = (int((c * chunk_size)), int(((c + 1) * chunk_size))) new_scenes.extend([(*x, chunk) for x in scenes]) return new_scenes
def local_spark() -> pyspark.SparkContext: 'Generate a spark context for local testing of small datasets\n\n Returns\n -------\n spark_context: A spark context\n ' spark = pyspark.sql.SparkSession.builder.master('local[*]').appName('dgp2wicker').config('spark.driver.memory', '56G').config('spark.executor.memory', '56G').config('spark.dynamicAllocation.enabled', 'true').config('spark.dynamicAllocation.maxExecutors', '4').config('spark.dynamicAllocation.minExecutors', '1').config('spark.executor.cores', '1').config('spark.task.maxFailures', '4').config('spark.driver.maxResultSize', '4G').config('spark.python.worker.memory', '24G').getOrCreate() return spark.sparkContext
def ingest_dgp_to_wicker(scene_dataset_json: str, wicker_dataset_name: str, wicker_dataset_version: str, dataset_kwargs: Dict, spark_context: pyspark.SparkContext, pipeline: Optional[List[Callable]]=None, max_num_scenes: int=None, max_len: int=1000, chunk_size: int=1000, skip_camera_cuboids: bool=True, num_partitions: int=None, num_repartitions: int=None, is_pd: bool=False, data_uri: str=None, alternate_scene_uri: str=None) -> Dict[(str, int)]: "Ingest DGP dataset into Wicker datastore\n\n Parameters\n ----------\n scene_dataset_json: str\n Path to scenes dataset json.\n\n wicker_dataset_name: str\n Name of the dataset used in Wicker datastore.\n\n wicker_dataset_version: str\n Semantic version of the dataset (i.e., xxx.xxx.xxx).\n\n spark_context: pyspark.SparkContext, default: None\n A spark context. If None, will generate one using dgp2wicker.ingest.local_spark() and default settings\n\n dataset_kwargs: dict\n Arguments for dataloader.\n\n spark_context: A spark context, default: None\n A spark context. If None, will use a local spark context.\n\n pipeline: List[Callable], default: None\n A list of transformations to apply to every sample.\n\n max_num_scenes: int, default: None\n An optional upper bound on the number of scenes to process. Typically used for testing.\n\n max_len: int, default: 1000\n Maximum expected length of a scene\n\n chunk_size: int, default: 1000\n Chunk size to split scenes into. If less than max_len, the same scene will be downloaded\n multiple times.\n\n skip_camera_cuboids: bool, default: True\n Optional to flag to skip converting 'bounding_box_3d' for image_datum types.\n\n num_partitions: int, default: None\n Number of partitions to map scenes over. If None, defaults to number of scenes\n\n num_repartitions: int, default: None\n Number of partitions to shuffle all samples over. If none, defaults to num_scenes*5\n\n is_pd: bool, default: False\n Flag to indicate if the dataset to laod is a Parallel Domain dataset. If true, the scenes\n will be loaded with ParallelDomainScene with use_virtual_cameras set to False.\n\n data_uri: str\n Optional path to raw data location if raw data is not stored alongside scene_dataset_json.\n\n alternate_scene_uri:\n If provided, download additional scene data from an alternate location. This happens before the\n scene containing scene_json_uri is downloaded and everything in scene_json_uri's location will\n overwrite this. This also expects that the scenes are structured as /<scene_dir>/scene.json\n and so any addtional data for this scene should be in alternate_scene_uri/<scene_dir>.\n This is useful if for some reason a scene json and an additional annotation are in a different location\n than the rest of the scene data.\n\n " def open_scene(scene_json_uri: str, temp_dir: str, dataset_kwargs: Dict[(str, Any)], alternate_scene_uri: Optional[str]=None) -> Union[(SynchronizedScene, ParallelDomainScene)]: 'Utility function to download a scene and open it\n\n Parameters\n ----------\n scene_json_uri: str\n Path to scene json.\n\n temp_dir: str\n Path to directory to store scene if downloaded from s3. Not used if scene_json is local\n\n dataset_kwargs: dict\n Arguments for data loader. i.e, datum_names, requested annotations etc. If this is a PD scene\n the dataset_kwargs should contain an `is_pd` key set to True.\n\n alternate_scene_uri: str, default = None\n Optional additional location to sync\n\n Returns\n -------\n dataset: A DGP dataset\n ' scene_dir_uri = os.path.dirname(scene_json_uri) scene_json = os.path.basename(scene_json_uri) if scene_dir_uri.startswith('s3://'): local_path = temp_dir assert (not temp_dir.startswith('s3')), f'{temp_dir}' if (alternate_scene_uri is not None): alternate_scene_dir = os.path.join(alternate_scene_uri, os.path.basename(scene_dir_uri)) logger.info(f'downloading additional scene data from {alternate_scene_dir} to {local_path}') sync_dir(alternate_scene_dir, local_path) logger.info(f'downloading scene from {scene_dir_uri} to {local_path}') sync_dir(scene_dir_uri, local_path) else: local_path = scene_dir_uri logger.info(f'Using local scene from {scene_dir_uri}') dataset_kwargs = deepcopy(dataset_kwargs) dataset_kwargs['scene_json'] = os.path.join(local_path, scene_json) is_pd = dataset_kwargs.pop('is_pd') if is_pd: dataset_kwargs['use_virtual_camera_datums'] = False dataset = ParallelDomainScene(**dataset_kwargs) else: dataset = SynchronizedScene(**dataset_kwargs) return dataset def process_scene(partition: List[Tuple[(int, str, str, Tuple[(int, int)])]], dataset_kwargs: Dict, pipeline: List[Callable], wicker_types: List[str]) -> Generator[(Tuple[(str, Any)], None, None)]: 'Main task to parrallelize. This takes a list of scene chunks and sequentially\n downloads the scene to a temporary directory, opens the scene, applies any transformations,\n and yields wicker serialized samples.\n\n Parameters\n ----------\n partition: tuple\n A list of scenes to process with this spark partition.\n Each entry should be a tuple with <index in dataset, split, scene_uri, (chunk start, chunk end)>.\n\n dataset_kwargs: Dict\n Arguments for data loader. See open_scene for details.\n\n pipeline: List[Callable]\n List of transformations to apply to samples.\n\n wicker_types: List\n A list of keys in the wicker schema.\n\n Returns\n -------\n wicker_sample: (split, sample)\n Yields a wicker converted sample\n ' for scene_info in partition: yield_count = 0 (global_scene_index, split, scene_json_uri, chunk) = scene_info (chunk_start, chunk_end) = chunk scene_dir_uri = os.path.dirname(scene_json_uri) scene_json = os.path.basename(scene_json_uri) st = time.time() with tempfile.TemporaryDirectory() as temp_dir: try: dataset = open_scene(scene_json_uri, temp_dir, dataset_kwargs, alternate_scene_uri=alternate_scene_uri) except Exception as e: logger.error(f'Failed to open {scene_json_uri}, skipping...') logger.error(e) traceback.print_exc() continue ontology_table = dataset.dataset_metadata.ontology_table for i in range(chunk_start, chunk_end): if (i >= len(dataset)): break try: (_, sample_index_in_scene, _) = dataset.dataset_item_index[i] sample = dataset[i][0] for transform in pipeline: sample = transform(sample) wicker_sample = dgp_to_wicker_sample(sample, wicker_keys=wicker_types.keys(), scene_index=int(global_scene_index), sample_index_in_scene=int(sample_index_in_scene), ontology_table=ontology_table, scene_uri=os.path.join(os.path.basename(scene_dir_uri), scene_json)) assert (wicker_sample is not None) for (k, v) in wicker_sample.items(): assert (v is not None), f'{k} has invalid wicker serialized item' yield_count += 1 (yield (split, deepcopy(wicker_sample))) except Exception as e: logger.error('failed to get sample, skipping...') logger.error(e) traceback.print_exc() continue dt = (time.time() - st) logger.info(f'Finished {global_scene_index} {split}/{scene_dir_uri}, chunk:{chunk_start}->{chunk_end}. Yielded {yield_count}, took {dt:.2f} seconds') dataset_kwargs['is_pd'] = is_pd if (pipeline is None): pipeline = [] scenes = get_scenes(scene_dataset_json, data_uri=data_uri) if (max_num_scenes is not None): scenes = scenes[:int(max_num_scenes)] if (num_partitions is None): num_partitions = len(scenes) if (num_repartitions is None): num_repartitions = (5 * len(scenes)) (_, _, scene_json_uri) = scenes[0] with tempfile.TemporaryDirectory() as temp_dir: dataset = open_scene(scene_json_uri, temp_dir, dataset_kwargs, alternate_scene_uri=alternate_scene_uri) logger.info(f'Got dataset with {len(dataset)} samples') sample = dataset[0][0] for transform in pipeline: sample = transform(sample) ontology_table = dataset.dataset_metadata.ontology_table wicker_types = wicker_types_from_sample(sample=sample, ontology_table=ontology_table, skip_camera_cuboids=skip_camera_cuboids) wicker_dataset_schema = wicker.schema.DatasetSchema(primary_keys=['scene_index', 'sample_index_in_scene'], fields=list(wicker_types.values())) scenes = chunk_scenes(scenes, max_len=max_len, chunk_size=chunk_size) scene_shuffle_idx = np.random.permutation(len(scenes)).tolist() scenes = [scenes[i] for i in scene_shuffle_idx] if (len(scenes) < 2): wsp.SPARK_PARTITION_SIZE = 3 if (spark_context is None): spark_context = local_spark() process = partial(process_scene, dataset_kwargs=dataset_kwargs, pipeline=pipeline, wicker_types=wicker_types) rdd = spark_context.parallelize(scenes, numSlices=num_partitions).mapPartitions(process).repartition(num_repartitions) return wsp.persist_wicker_dataset(wicker_dataset_name, wicker_dataset_version, wicker_dataset_schema, rdd)
class CustomInstallCommand(install): 'Custom install command' def run(self): install.run(self)
class CustomDevelopCommand(develop): 'Custom develop command' def run(self): develop.run(self)
def s3_is_configured(): 'Utility to check if there is a valid s3 dataset path configured' wicker_config_path = os.getenv('WICKER_CONFIG_PATH', os.path.expanduser('~/wickerconfig.json')) with open(wicker_config_path, 'r', encoding='utf-8') as f: wicker_config = json.loads(f.read()) return wicker_config['aws_s3_config']['s3_datasets_path'].startswith('s3://')
class TestDDGP2Wicker(unittest.TestCase): def setUp(self): 'Create a local dgp dataset for testing' self.dataset = SynchronizedSceneDataset(TEST_WICKER_DATASET_JSON, split='train', datum_names=['LIDAR', 'CAMERA_01'], forward_context=0, backward_context=0, requested_annotations=('bounding_box_2d', 'bounding_box_3d')) def test_keys(self): 'Sanity check the key parsing' (datum_key, datum_field) = ('CAMERA_01', 'timestamp') key = gen_wicker_key(datum_key, datum_field) (datum_key2, datum_field2) = parse_wicker_key(key) assert (datum_key == datum_key2) assert (datum_field == datum_field2) def test_schema(self): 'Sanity check the schema generation' sample = self.dataset[0][0] ontology_table = self.dataset.dataset_metadata.ontology_table wicker_types = wicker_types_from_sample(sample, ontology_table, skip_camera_cuboids=True) expected_keys = ['CAMERA_01____timestamp', 'CAMERA_01____rgb', 'CAMERA_01____intrinsics', 'CAMERA_01____distortion', 'CAMERA_01____extrinsics', 'CAMERA_01____pose', 'CAMERA_01____bounding_box_2d', 'CAMERA_01____datum_type', 'LIDAR____timestamp', 'LIDAR____extrinsics', 'LIDAR____pose', 'LIDAR____point_cloud', 'LIDAR____extra_channels', 'LIDAR____bounding_box_3d', 'LIDAR____datum_type', 'ontology____bounding_box_2d', 'ontology____bounding_box_3d', 'scene_index', 'sample_index_in_scene', 'scene_uri'] assert (set(expected_keys) == set(wicker_types.keys())) def test_conversion(self): 'Test serializers and conversion to wicker formats' sample = self.dataset[0][0] ontology_table = self.dataset.dataset_metadata.ontology_table wicker_types = wicker_types_from_sample(sample, ontology_table, skip_camera_cuboids=True) sample_dict = {datum['datum_name']: datum for datum in sample} wicker_sample = dgp_to_wicker_sample(sample=sample, wicker_keys=list(wicker_types.keys()), scene_index=0, sample_index_in_scene=0, ontology_table=ontology_table, scene_uri='scene/scene.json') assert (set(wicker_sample.keys()) == set(wicker_types.keys())) for (key, raw) in wicker_sample.items(): if (key in ('scene_index', 'sample_index_in_scene', 'scene_uri')): continue (datum_key, datum_field) = parse_wicker_key(key) if (datum_key == 'ontology'): serializer = ws.OntologySerializer(datum_field) elif (datum_field in FIELD_TO_WICKER_SERIALIZER): serializer = FIELD_TO_WICKER_SERIALIZER[datum_field]() else: print(f'{key} not supported') continue if hasattr(serializer, 'ontology'): serializer.ontology = ontology_table[datum_field] unserialized = serializer.unserialize(raw) if (datum_field == 'rgb'): assert isinstance(unserialized, Image) org_im = np.array(sample_dict[datum_key]['rgb']) new_im = np.array(unserialized) psnr = cv2.PSNR(org_im, new_im) assert (psnr > 40) elif (datum_field in ('point_cloud', 'extra_channels', 'intrinsics')): org = sample_dict[datum_key][datum_field] assert np.allclose(org, unserialized) elif (datum_field in ('pose', 'extrinsics')): org = sample_dict[datum_key][datum_field] assert np.allclose(org.matrix, unserialized.matrix) elif (datum_key != 'ontology'): org = sample_dict[datum_key][datum_field] assert (org == unserialized) @unittest.skipUnless(s3_is_configured(), 'Requires S3') def test_ingest(self): 'Test ingestion' wsp.SPARK_PARTITION_SIZE = 12 output = ingest_dgp_to_wicker(scene_dataset_json=TEST_WICKER_DATASET_JSON, wicker_dataset_name=TEST_WICKER_DATASET_NAME, wicker_dataset_version=TEST_WICKER_DATASET_VERSION, dataset_kwargs=TEST_WICKER_DATASET_KWARGS, spark_context=None, pipeline=None, max_num_scenes=None, max_len=1000, chunk_size=1000, skip_camera_cuboids=True, num_partitions=None, num_repartitions=None, is_pd=False, data_uri=None) assert (output['train'] == 6) assert (output['val'] == 6) @unittest.skipUnless(s3_is_configured(), 'Requires S3') def test_ingest_cli(self): 'Test ingestion via the cli' wsp.SPARK_PARTITION_SIZE = 12 cmd = f'''--scene-dataset-json {TEST_WICKER_DATASET_JSON} ''' cmd += f'''--wicker-dataset-name {TEST_WICKER_DATASET_NAME} ''' cmd += f'''--wicker-dataset-version {TEST_WICKER_DATASET_VERSION} ''' cmd += '--datum-names CAMERA_01,LIDAR\n' cmd += '--requested-annotations bounding_box_2d,bounding_box_3d\n' cmd += '--only-annotated-datums\n' cmd += '--half-size-images\n' cmd += '--add-lidar-points' runner = CliRunner() result = runner.invoke(ingest, cmd) assert (result.exit_code == 0) @unittest.skipUnless(s3_is_configured(), 'Requires S3') def test_dataset(self): 'Test That we can read a dataset from wicker' self.test_ingest() columns = compute_columns(datum_names=['CAMERA_01', 'LIDAR'], datum_types=['image', 'point_cloud'], requested_annotations=['bounding_box_2d', 'bounding_box_3d'], cuboid_datum='LIDAR', with_ontology_table=True) dataset = DGPS3Dataset(dataset_name=TEST_WICKER_DATASET_NAME, dataset_version=TEST_WICKER_DATASET_VERSION, dataset_partition_name='train', columns_to_load=columns) sample = dataset[0][0] expected_camera_fields = {'extrinsics', 'bounding_box_2d', 'pose', 'datum_name', 'datum_type', 'distortion', 'intrinsics', 'rgb', 'timestamp'} expected_lidar_fields = {'pose', 'datum_name', 'datum_type', 'extra_channels', 'point_cloud', 'bounding_box_3d', 'extrinsics', 'timestamp'} assert (set(sample['CAMERA_01'].keys()) == expected_camera_fields) assert isinstance(sample['CAMERA_01']['rgb'], Image) assert (set(sample['LIDAR'].keys()) == expected_lidar_fields) assert (set(dataset.ontology_table.keys()) == {'bounding_box_2d', 'bounding_box_3d'})
class BaseAgentDataset(): 'A base class representing a Agent Dataset. Provides utilities for parsing and slicing\n DGP format agent datasets.\n\n Parameters\n ----------\n Agent_dataset_metadata: DatasetMetadata\n Dataset metadata object that encapsulates dataset-level agents metadata for\n both operating modes (scene or JSON).\n\n agent_groups: list[AgentContainer]\n List of AgentContainer objects to be included in the dataset.\n\n split: str, default: None\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n If the split is None, the split type is not known and the dataset can\n be used for unsupervised / self-supervised learning.\n ' def __init__(self, Agent_dataset_metadata, agent_groups, split=None): logging.info(f'Instantiating dataset with {len(agent_groups)} scenes.') self.Agent_dataset_metadata = Agent_dataset_metadata self.split = split self.agent_groups = agent_groups self.dataset_item_index = self._build_item_index() @staticmethod def _load_agents_data(agent_group, ontology_table, feature_ontology_table): 'Call loading method from agent_group to load agent slice data and agent track data.\n\n Parameters\n ----------\n agent_group: AgentContainer\n Group of agents from a scene.\n\n ontology_table: dict[str->dgp.annotations.Ontology]\n A dictionary mapping annotation type key(s) to Ontology(s)\n\n feature_ontology_table: dict, default: None\n A dictionary mapping feature type key(s) to Ontology(s).\n\n Returns\n -------\n agent_group: AgentContainer\n An AgentContainer objects with agents loaded.\n ' agent_group.load_agent_data(ontology_table, feature_ontology_table) return agent_group @staticmethod def _extract_agent_groups_from_agent_dataset_json(agent_dataset_json, requested_agent_type, split='train', use_diskcache=True): 'Extract agent container objects from the agent dataset JSON\n for the appropriate split.\n\n Parameters\n ----------\n agent_dataset_json: str\n Path of the dataset.json\n\n requested_agent_type: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n split: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes in this\n scene dataset.\n\n Returns\n -------\n scene_containers: list\n List of SceneContainer objects.\n ' assert (split in ('train', 'val', 'test', 'train_overfit')) split_enum = {'train': dataset_pb2.TRAIN, 'val': dataset_pb2.VAL, 'test': dataset_pb2.TEST, 'train_overfit': dataset_pb2.TRAIN_OVERFIT}[split] if (not agent_dataset_json.startswith('s3://')): assert os.path.exists(agent_dataset_json), 'Path {} does not exist'.format(agent_dataset_json) logging.info('Loading dataset from {}, split={}'.format(agent_dataset_json, split)) agent_dataset_root = os.path.dirname(agent_dataset_json) agent_dataset = open_pbobject(agent_dataset_json, AgentsPb2) logging.info('Generating agents for split={}'.format(split)) st = time.time() agent_jsons = [os.path.join(agent_dataset_root, _f) for _f in list(agent_dataset.agents_splits[split_enum].filenames)] with Pool(cpu_count()) as proc: agent_containers = list(proc.map(partial(AgentDataset._get_agent_container, requested_agent_type=requested_agent_type, use_diskcache=use_diskcache), agent_jsons)) logging.info('Scene generation completed in {:.2f}s'.format((time.time() - st))) return agent_containers @staticmethod def _get_agent_container(agent_json, requested_agent_type, use_diskcache=True): 'Extract scene objects and calibration from the scene dataset JSON\n for the appropriate split.\n\n Parameters\n ----------\n agent_json: str\n Path of the agent_scene.json\n\n requested_agent_type: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes in this\n scene dataset.\n\n Returns\n -------\n scene_containers: list\n List of SceneContainer objects.\n ' agent_dir = os.path.dirname(agent_json) logging.debug(f'Loading agents from {agent_json}') agent_container = AgentContainer(agent_json, requested_agent_type, directory=agent_dir, use_diskcache=use_diskcache) return agent_container def _build_item_index(self): 'Builds an index of dataset items that refer to the scene index,\n sample index and selected datum names. __getitem__ indexes into this look up table.\n\n Returns\n -------\n item_index: list\n List of dataset items that contain index into\n (scene_idx, sample_idx_in_scene, (datum_name_1, datum_name_2, ...)).\n ' raise NotImplementedError def __len__(self): 'Return the length of the dataset.' raise NotImplementedError def __getitem__(self, index): 'Get the dataset item at index.' raise NotImplementedError def __hash__(self): 'Hashes the dataset instance that is consistent across Python instances.' logging.debug('Hashing dataset with dataset directory, split and datum-index') return int(hashlib.sha1(((self.Agent_dataset_metadata.directory.encode() + str(self.dataset_item_index).encode()) + str(self.split).encode())).hexdigest(), 16)
class AgentContainer(): 'Object-oriented container for holding agent information from a scene.\n ' RANDOM_STR = ''.join([str(random.randint(0, 9)) for _ in range(5)]) cache_suffix = os.environ.get('DGP_SCENE_CACHE_SUFFIX', RANDOM_STR) cache_dir = os.path.join(DGP_CACHE_DIR, f'dgp_diskcache_{cache_suffix}') AGENT_GROUP_CACHE = Cache(cache_dir) def __init__(self, agent_file_path, requested_agent_type, directory=None, use_diskcache=True): 'Initialize a scene with a agent group object and optionally provide the\n directory containing the agent.json to gather additional information\n for directory-based dataset loading mode.\n\n Parameters\n ----------\n agent_file_path: str\n Path to the agent object containing agent tracks and agent slices.\n\n requested_agent_type: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n directory: str, default: None\n Directory containing scene_<sha1>.json.\n\n use_diskcache: bool, default: True\n If True, cache AgentGroupPb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes.\n ' self.agent_file_path = agent_file_path self.directory = directory self.use_diskcache = use_diskcache self.requested_agent_type = requested_agent_type self._agent_group = None self.sample_id_to_agent_snapshots = {} self.instance_id_to_agent_snapshots = {} logging.debug(f'Loading agent-based dataset from {self.directory}') @property def agent_group(self): ' Returns agent group.\n - If self.use_diskcache is True: returns the cached `_agent_group` if available, otherwise load the\n agent group and cache it.\n - If self.use_diskcache is False: returns `_agent_group` in memory if the instance has attribute\n `_agent_group`, otherwise load the agent group and save it in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of agent groups.\n ' if self.use_diskcache: if (self.agent_file_path in AgentContainer.AGENT_GROUP_CACHE): _agent_group = AgentContainer.AGENT_GROUP_CACHE.get(self.agent_file_path) if (_agent_group is not None): return _agent_group _agent_group = open_pbobject(self.agent_file_path, AgentGroupPb2) AgentContainer.AGENT_GROUP_CACHE.add(self.agent_file_path, _agent_group) return _agent_group else: if (self._agent_group is None): self._agent_group = open_pbobject(self.agent_file_path, AgentGroupPb2) return self._agent_group def __repr__(self): return 'AgentContainer[{}][agents: {}]'.format(self.directory, len(self.instance_id_to_agent_snapshots)) @property def ontology_files(self): "Returns the ontology files for the agent group.\n\n Returns\n -------\n ontology_files: dict\n Maps annotation_key -> filename.\n\n For example:\n filename = agent.ontology_files['bounding_box_2d']\n " ontology_files = {ANNOTATION_TYPE_ID_TO_KEY[ann_id]: os.path.join(self.directory, ONTOLOGY_FOLDER, '{}.json'.format(f)) for (ann_id, f) in self.agent_group.agent_ontologies.items()} return ontology_files @property def feature_ontology_files(self): "Returns the feature ontology files for a agent group.\n\n Returns\n -------\n ontology_files: dict\n Maps annotation_key -> filename.\n\n For example:\n filename = agent.feature_ontology_files['agent_3d']\n " feature_ontology_files = {FEATURE_TYPE_ID_TO_KEY[feature_id]: os.path.join(self.directory, FEATURE_ONTOLOGY_FOLDER, '{}.json'.format(f)) for (feature_id, f) in self.agent_group.feature_ontologies.items()} return feature_ontology_files @property @lru_cache(maxsize=None) def metadata_index(self): 'Helper for building metadata index.\n\n TODO: Need to verify that the hashes are unique, and these lru-cached\n properties are consistent across disk-cached reads.\n ' logging.debug(f'Building metadata index for agent group {self.agent_file_path}') agent_group = self.agent_group return {'log_id': agent_group.log, 'agent_group_name': agent_group.name, 'agent_group_description': agent_group.description} def agent_slice(self, sample_id): 'Return AgentSnapshotList in a frame.' return self.sample_id_to_agent_snapshots[sample_id] def agent_track(self, instance_id): 'Return AgentSnapshotList in a track.' return self.instance_id_to_agent_snapshots[instance_id] def load_agent_data(self, ontology_table, feature_ontology_table): 'Load agent slice data and agent track data.\n\n Parameters\n ----------\n ontology_table: dict[str->dgp.annotations.Ontology]\n Ontology object *per annotation type*.\n The original ontology table.\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n "bounding_box_3d": BoundingBoxOntology[<ontology_sha>],\n }\n\n feature_ontology_table: dict[str->dgp.features.FeatureOntology]\n Ontology object *per feature type*.\n The original feature ontology table.\n {\n "agent_2d": AgentFeatureOntology,\n "agent_3d": AgentFeatureOntology,\n "ego_intention": AgentFeatureOntology\n }\n\n ' agent_slices_path = self.agent_group.agents_slices_file agent_tracks_path = self.agent_group.agent_tracks_file if ((agent_slices_path is None) or (agent_tracks_path is None)): logging.debug('Skipping agent_group {} due to missing agents'.format(self.agent_group)) return [] agents_slices_file = os.path.join(self.directory, agent_slices_path) agent_tracks_file = os.path.join(self.directory, agent_tracks_path) agents_slices_pb2 = open_pbobject(agents_slices_file, AgentsSlicesPb2) agent_tracks_pb2 = open_pbobject(agent_tracks_file, AgentTracksPb2) agent_ontology = ontology_table[AGENT_TYPE_TO_ANNOTATION_TYPE[self.requested_agent_type]] for agent_slice in agents_slices_pb2.agents_slices: agents_list = AGENT_REGISTRY[self.requested_agent_type].load(agent_slice.agent_snapshots, agent_ontology, feature_ontology_table) self.sample_id_to_agent_snapshots[agent_slice.slice_id.index] = agents_list for track in agent_tracks_pb2.agent_tracks: self.instance_id_to_agent_snapshots[track.instance_id] = AGENT_REGISTRY[self.requested_agent_type].load(track.agent_snapshots, agent_ontology, feature_ontology_table)
class AgentDataset(BaseAgentDataset): 'Dataset for agent-centric prediction or planning use cases, works just like normal SynchronizedSceneDataset,\n but guaranteeing trajectory of main agent is present in any fetched sample.\n\n Parameters\n ----------\n scene_dataset_json: str\n Full path to the scene dataset json holding collections of paths to scene json.\n\n agents_dataset_json: str\n Full path to the agent dataset json holding collections of paths to scene json.\n\n split: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_agent_type: tuple, default: None\n Tuple of agent type, i.e. (\'agent_2d\', \'agent_3d\'). Only one type of agent can be requested.\n\n requested_main_agent_classes: tuple, default: \'car\'\n Tuple of main agent types, i.e. (\'car\', \'pedestrian\').\n The string should be the same as dataset_metadata.ontology_table.\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n min_main_agent_forward: int, default: 0\n Minimum forward samples for main agent. The main-agent will be guaranteed to appear\n minimum samples in forward context; i.e., the main-agent will appear in number of\n [min_main_agent_forward, forward_context] samples in forward direction.\n\n min_main_agent_backward: int, default: 0\n Minimum backward samples for main agent. The main-agent will be guaranteed to appear\n minimum samples in backward context; i.e., the main-agent will appear in number of\n [min_main_agent_backward, backward_context] samples in backward direction.\n\n generate_depth_from_datum: str, default: None\n Datum name of the point cloud. If is not None, then the depth map will be generated for the camera using\n the desired point cloud.\n\n batch_per_agent: bool, default: False\n Include whole trajectory of an agent in each batch fetch, this is designed to be used for inference.\n If True, backward_context = forward_context = 0 implicitly.\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes.\n\n ' def __init__(self, scene_dataset_json, agents_dataset_json, split='train', datum_names=None, requested_agent_type='agent_3d', requested_main_agent_classes=('car',), requested_feature_types=None, requested_autolabels=None, forward_context=0, backward_context=0, min_main_agent_forward=0, min_main_agent_backward=0, generate_depth_from_datum=None, batch_per_agent=False, use_diskcache=True): if (requested_agent_type is not None): assert (requested_agent_type in AGENT_REGISTRY), 'Invalid agent type requested!' self.requested_agent_type = requested_agent_type else: self.requested_agent_type = () if (requested_feature_types is not None): assert all(((requested_feature_type in ALL_FEATURE_TYPES) for requested_feature_type in requested_feature_types)), 'Invalid feature type requested!' self.requested_feature_types = requested_feature_types else: self.requested_feature_types = () self.batch_per_agent = batch_per_agent self.generate_depth_from_datum = generate_depth_from_datum datum_names = (sorted(set(datum_names)) if datum_names else set([])) self.selected_datums = [_d.lower() for _d in datum_names] if (len(self.selected_datums) != 0): self.synchronized_dataset = SynchronizedSceneDataset(scene_dataset_json, split=split, backward_context=backward_context, requested_autolabels=requested_autolabels, forward_context=forward_context, datum_names=self.selected_datums, use_diskcache=use_diskcache) assert ((min_main_agent_backward <= backward_context) and (min_main_agent_forward <= forward_context)), 'Provide valid minimum context for main agent.' if batch_per_agent: backward_context = forward_context = 0 self.forward_context = forward_context self.backward_context = backward_context self.min_main_agent_forward = (min_main_agent_forward if min_main_agent_forward else forward_context) self.min_main_agent_backward = (min_main_agent_backward if min_main_agent_forward else backward_context) agent_groups = AgentDataset._extract_agent_groups_from_agent_dataset_json(agents_dataset_json, requested_agent_type, split=split) agent_metadata = AgentMetadata.from_agent_containers(agent_groups, requested_agent_type, requested_feature_types) name_to_id = agent_metadata.ontology_table[AGENT_TYPE_TO_ANNOTATION_TYPE[requested_agent_type]].name_to_id self.requested_main_agent_classes = tuple([(name_to_id[atype] + 1) for atype in requested_main_agent_classes]) with Pool(cpu_count()) as proc: agent_groups = list(proc.map(partial(self._load_agents_data, ontology_table=agent_metadata.ontology_table, feature_ontology_table=agent_metadata.feature_ontology_table), agent_groups)) super().__init__(agent_metadata, agent_groups=agent_groups) if batch_per_agent: self._build_agent_index() def _build_agent_index(self): 'Build an index of agents grouped by instance id. This index is used to index dataset by agent track.\n\n Returns\n -------\n agent_item_index: list\n List of dataset items that contain index into.\n (main_agent_id, scene_idx, sample_idx_in_scene_start, sample_idx_in_scene_end, [datum_name ...]).\n ' self.dataset_agent_index = defaultdict(list) logging.info('Building agent index, this will take a while.') for index in range(len(self.dataset_item_index)): (scene_idx, sample_idx_in_scene, main_agent_id, datum_names) = self.dataset_item_index[index] main_agent_id_and_scene_idx = f'{str(scene_idx)}_{str(main_agent_id)}' if (main_agent_id_and_scene_idx not in self.dataset_agent_index): self.dataset_agent_index[main_agent_id_and_scene_idx] = [(- 1), (- 1), float('inf'), (- 1), []] self.dataset_agent_index[main_agent_id_and_scene_idx] = [main_agent_id, scene_idx, min(sample_idx_in_scene, self.dataset_agent_index[main_agent_id_and_scene_idx][2]), max(sample_idx_in_scene, self.dataset_agent_index[main_agent_id_and_scene_idx][3]), datum_names] self.dataset_agent_index = [v for (k, v) in self.dataset_agent_index.items()] def _build_item_index(self): 'Builds an index of dataset items that refer to the scene index, agent index,\n sample index and datum_within_scene index. This refers to a particular dataset\n split. __getitem__ indexes into this look up table.\n\n Synchronizes at the sample-level and only adds sample indices if context frames are available.\n This is enforced by adding sample indices that fall in (bacwkard_context, N-forward_context) range.\n\n Returns\n -------\n item_index: list\n List of dataset items that contain index into\n (scene_idx, sample_idx_in_scene, (main_agent_idx, main_agent_id), [datum_name ...]).\n ' logging.info(f'Building index for {self.__class__.__name__}, this will take a while.') st = time.time() with Pool(cpu_count()) as proc: item_index = proc.starmap(self._item_index_for_scene, [(scene_idx,) for scene_idx in range(len(self.agent_groups))]) logging.info(f'Index built in {(time.time() - st):.2f}s.') assert (len(item_index) > 0), 'Failed to index items in the dataset.' item_index = list(itertools.chain.from_iterable(item_index)) item_index = [item for item in item_index if (item is not None)] item_lengths = [len(item_tup) for item_tup in item_index] assert all([(l == item_lengths[0]) for l in item_lengths]), 'All sample items are not of the same length, datum names might be missing.' return item_index def _item_index_for_scene(self, scene_idx): agent_group = self.agent_groups[scene_idx] num_samples = len(agent_group.sample_id_to_agent_snapshots) instance_id_to_trajectory = defaultdict(list) instance_id_to_segment_idx = defaultdict(int) for (sample_id, agents_slice) in agent_group.sample_id_to_agent_snapshots.items(): for agent_snapshot in agents_slice: if (agent_snapshot.class_id not in self.requested_main_agent_classes): continue instance_index_prefix = f'{str(scene_idx)}_{str(agent_snapshot.instance_id)}' segment_idx_start = (instance_id_to_segment_idx[instance_index_prefix] if (instance_index_prefix in instance_id_to_segment_idx) else 0) for segment_idx in range(segment_idx_start, num_samples): instance_index_id = f'{instance_index_prefix}_{segment_idx}' if ((instance_index_id in instance_id_to_trajectory) and ((instance_id_to_trajectory[instance_index_id][(- 1)][1] + 1) != sample_id)): continue instance_id_to_trajectory[instance_index_id].append((scene_idx, sample_id, agent_snapshot.instance_id, self.selected_datums)) instance_id_to_segment_idx[instance_index_prefix] = segment_idx break item_index = [] trajectory_min_length = ((self.min_main_agent_backward + self.min_main_agent_forward) + 1) for id_ in instance_id_to_trajectory: trajectory_length = len(instance_id_to_trajectory[id_]) if (trajectory_length >= trajectory_min_length): first_sample_idx = instance_id_to_trajectory[id_][0][1] final_sample_idx = instance_id_to_trajectory[id_][(- 1)][1] beg = (self.min_main_agent_backward if ((self.min_main_agent_backward + first_sample_idx) > self.backward_context) else self.backward_context) end = (trajectory_length - (self.min_main_agent_forward if ((self.min_main_agent_forward + final_sample_idx) < num_samples) else self.forward_context)) if (end > beg): item_index.append(instance_id_to_trajectory[id_][beg:end]) return list(itertools.chain.from_iterable(item_index)) def __len__(self): 'Return the length of the dataset.' return (len(self.dataset_agent_index) if self.batch_per_agent else len(self.dataset_item_index)) def __getitem__(self, index): 'Get the dataset item at index.\n\n Parameters\n ----------\n index: int\n Index of item to get.\n\n Returns\n -------\n datums: list of OrderedDict\n List of datum_data at (scene_idx, sample_idx_in_scene, datum_name). \n Datum_names can be image, point_cloud, radar_point_cloud, etc.\n\n agents: AgentSnapshotList\n AgentSnapshotList in a frame.\n\n main_agent_id: int\n Instance ID of main agent.\n\n main_agent_idx: int \n Index of main agent in AgentSlice.\n\n Returns a list of OrderedDict(s).\n Outer list corresponds to temporal ordering of samples. Each element is\n a OrderedDict(s) corresponding to synchronized datums and agents.\n ' assert (self.dataset_item_index is not None), 'Index is not built, select datums before getting elements.' if self.batch_per_agent: (main_agent_id, scene_idx, sample_idx_in_scene_start, sample_idx_in_scene_end, datum_names) = self.dataset_agent_index[index] else: (scene_idx, sample_idx_in_scene, main_agent_id, datum_names) = self.dataset_item_index[index] sample_idx_in_scene_start = (sample_idx_in_scene - self.backward_context) sample_idx_in_scene_end = (sample_idx_in_scene + self.forward_context) context_window = [] for qsample_idx_in_scene in range(sample_idx_in_scene_start, (sample_idx_in_scene_end + 1)): datums = [] if (len(datum_names) > 0): for datum_name in datum_names: datum_data = self.synchronized_dataset.get_datum_data(scene_idx, qsample_idx_in_scene, datum_name) datums.append(datum_data) agents_in_sample = self.agent_groups[scene_idx].agent_slice(qsample_idx_in_scene) instance_matched = [(agent.instance_id == main_agent_id) for agent in agents_in_sample] main_agent_idx_in_agents_slice = (instance_matched.index(True) if any(instance_matched) else None) synchronized_sample = OrderedDict({'datums': datums, 'agents': agents_in_sample, 'main_agent_id': main_agent_id, 'main_agent_idx': main_agent_idx_in_agents_slice}) context_window.append(synchronized_sample) return context_window
class AgentDatasetLite(BaseAgentDataset): 'Dataset for agent-centric prediction or planning use cases. It provide two mode of accessing agent, by track\n and by frame. If \'batch_per_agent\' is set true, then the data iterate per track, note, the length of the track\n could vary. Otherwise, the data iterate per frame and each sample contains all agents in the frame.\n\n Parameters\n ----------\n scene_dataset_json: str\n Full path to the scene dataset json holding collections of paths to scene json.\n\n agents_dataset_json: str\n Full path to the agent dataset json holding collections of paths to scene json.\n\n split: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_agent_type: tuple, default: None\n Tuple of agent type, i.e. (\'agent_2d\', \'agent_3d\'). Only one type of agent can be requested.\n\n requested_main_agent_classes: tuple, default: \'car\'\n Tuple of main agent types, i.e. (\'car\', \'pedestrian\').\n The string should be the same as dataset_metadata.ontology_table.\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n batch_per_agent: bool, default: False\n Include whole trajectory of an agent in each batch fetch.\n If True, backward_context = forward_context = 0 implicitly.\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes.\n\n ' def __init__(self, scene_dataset_json, agents_dataset_json, split='train', datum_names=None, requested_agent_type='agent_3d', requested_main_agent_classes=('car',), requested_feature_types=None, requested_autolabels=None, forward_context=0, backward_context=0, batch_per_agent=False, use_diskcache=True): if (requested_agent_type is not None): assert (requested_agent_type in AGENT_REGISTRY), 'Invalid agent type requested!' self.requested_agent_type = requested_agent_type else: self.requested_agent_type = () if (requested_feature_types is not None): assert all(((requested_feature_type in ALL_FEATURE_TYPES) for requested_feature_type in requested_feature_types)), 'Invalid feature type requested!' self.requested_feature_types = requested_feature_types else: self.requested_feature_types = () self.batch_per_agent = batch_per_agent datum_names = (sorted(set(datum_names)) if datum_names else set([])) self.selected_datums = [_d.lower() for _d in datum_names] if (len(self.selected_datums) != 0): self.synchronized_dataset = SynchronizedSceneDataset(scene_dataset_json, split=split, backward_context=backward_context, requested_autolabels=requested_autolabels, forward_context=forward_context, datum_names=self.selected_datums, use_diskcache=use_diskcache) if batch_per_agent: backward_context = forward_context = 0 self.forward_context = forward_context self.backward_context = backward_context agent_groups = AgentDataset._extract_agent_groups_from_agent_dataset_json(agents_dataset_json, requested_agent_type, split=split) agent_metadata = AgentMetadata.from_agent_containers(agent_groups, requested_agent_type, requested_feature_types) name_to_id = agent_metadata.ontology_table[AGENT_TYPE_TO_ANNOTATION_TYPE[requested_agent_type]].name_to_id self.requested_main_agent_classes = tuple([(name_to_id[atype] + 1) for atype in requested_main_agent_classes]) with Pool(cpu_count()) as proc: agent_groups = list(proc.map(partial(self._load_agents_data, ontology_table=agent_metadata.ontology_table, feature_ontology_table=agent_metadata.feature_ontology_table), agent_groups)) super().__init__(agent_metadata, agent_groups=agent_groups) def _build_item_index(self): 'Builds an index of dataset items that refer to the scene index, agent index,\n sample index and datum_within_scene index. This refers to a particular dataset\n split. __getitem__ indexes into this look up table.\n\n Synchronizes at the sample-level and only adds sample indices if context frames are available.\n This is enforced by adding sample indices that fall in (bacwkard_context, N-forward_context) range.\n\n Returns\n -------\n item_index: list\n List of dataset items that contain index into\n (scene_idx, sample_idx_in_scene, (main_agent_idx, main_agent_id), [datum_name ...]).\n ' logging.info(f'Building index for {self.__class__.__name__}, this will take a while.') st = time.time() if self.batch_per_agent: with Pool(cpu_count()) as proc: item_index = proc.starmap(partial(self._agent_index_for_scene, selected_datums=self.selected_datums), [(scene_idx, agent_group) for (scene_idx, agent_group) in enumerate(self.agent_groups)]) else: with Pool(cpu_count()) as proc: item_index = proc.starmap(partial(self._item_index_for_scene, backward_context=self.backward_context, forward_context=self.forward_context, selected_datums=self.selected_datums), [(scene_idx, agent_group) for (scene_idx, agent_group) in enumerate(self.agent_groups)]) logging.info(f'Index built in {(time.time() - st):.2f}s.') assert (len(item_index) > 0), 'Failed to index items in the dataset.' item_index = list(itertools.chain.from_iterable(item_index)) item_index = [item for item in item_index if (item is not None)] item_lengths = [len(item_tup) for item_tup in item_index] assert all([(l == item_lengths[0]) for l in item_lengths]), 'All sample items are not of the same length, datum names might be missing.' return item_index @staticmethod def _item_index_for_scene(scene_idx, agent_group, backward_context, forward_context, selected_datums): sample_range = np.arange(backward_context, (len(agent_group.sample_id_to_agent_snapshots) - forward_context)) scene_item_index = [(scene_idx, sample_idx, selected_datums) for sample_idx in sample_range] return scene_item_index @staticmethod def _agent_index_for_scene(scene_idx, agent_group, selected_datums): scene_item_index = [(scene_idx, instance_id, selected_datums) for instance_id in agent_group.instance_id_to_agent_snapshots] return scene_item_index def __len__(self): 'Return the length of the dataset.' return len(self.dataset_item_index) def __getitem__(self, index): 'Get the dataset item at index.\n\n Parameters\n ----------\n index: int\n Index of item to get.\n\n Returns\n -------\n datums: list of OrderedDict\n List of datum_data at (scene_idx, sample_idx_in_scene, datum_name).\n Datum_names can be image, point_cloud, radar_point_cloud, etc.\n\n Agents: AgentSnapshotList\n A list of agent snapshots.\n\n Returns a list of OrderedDict(s).\n Outer list corresponds to temporal ordering of samples. Each element is\n a OrderedDict(s) corresponding to synchronized datums and agents.\n ' assert (self.dataset_item_index is not None), 'Index is not built, select datums before getting elements.' context_window = [] if self.batch_per_agent: (scene_idx, instance_id, datum_names) = self.dataset_item_index[index] track = self.agent_groups[scene_idx].agent_track(instance_id) ontology = track.ontology type_of_track = type(track) for agent_snapshot in track: qsample_idx_in_scene = agent_snapshot.sample_idx datums = [] if (len(datum_names) > 0): for datum_name in datum_names: datum_data = self.synchronized_dataset.get_datum_data(scene_idx, qsample_idx_in_scene, datum_name) datums.append(datum_data) synchronized_sample = OrderedDict({'datums': datums, 'agents': type_of_track(ontology, [agent_snapshot])}) context_window.append(synchronized_sample) else: (scene_idx, sample_idx_in_scene, datum_names) = self.dataset_item_index[index] sample_idx_in_scene_start = (sample_idx_in_scene - self.backward_context) sample_idx_in_scene_end = (sample_idx_in_scene + self.forward_context) for qsample_idx_in_scene in range(sample_idx_in_scene_start, (sample_idx_in_scene_end + 1)): datums = [] if (len(datum_names) > 0): for datum_name in datum_names: datum_data = self.synchronized_dataset.get_datum_data(scene_idx, qsample_idx_in_scene, datum_name) datums.append(datum_data) agents_in_sample = self.agent_groups[scene_idx].agent_slice(qsample_idx_in_scene) synchronized_sample = OrderedDict({'datums': datums, 'agents': agents_in_sample}) context_window.append(synchronized_sample) return context_window
class AgentMetadata(): 'A Wrapper agents metadata class to support two entrypoints for agents dataset\n (reading from agents.json).\n\n Parameters\n ----------\n agent_groups: list[AgentContainer]\n List of AgentContainer objects to be included in the dataset.\n\n directory: str\n Directory of agent_dataset.\n\n feature_ontology_table: dict, default: None\n A dictionary mapping feature type key(s) to Ontology(s), i.e.:\n {\n "agent_2d": AgentFeatureOntology[<ontology_sha>],\n "agent_3d": AgentFeatureOntology[<ontology_sha>]\n }\n ontology_table: dict, default: None\n A dictionary mapping annotation key(s) to Ontology(s), i.e.:\n {\n "bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "autolabel_model_1/bounding_box_2d": BoundingBoxOntology[<ontology_sha>],\n "semantic_segmentation_2d": SemanticSegmentationOntology[<ontology_sha>]\n }\n ' def __init__(self, agent_groups, directory, feature_ontology_table=None, ontology_table=None): assert (directory is not None), 'Dataset directory is required, and cannot be None.' self.agent_groups = agent_groups self.directory = directory self.feature_ontology_table = feature_ontology_table self.ontology_table = ontology_table @classmethod def from_agent_containers(cls, agent_containers, requested_agent_types=None, requested_feature_types=None): "Load DatasetMetadata from Scene Dataset JSON.\n\n Parameters\n ----------\n agent_containers: list of AgentContainer\n List of AgentContainer objects.\n\n requested_agent_types: List(str)\n List of agent types, such as ['agent_3d', 'agent_2d']\n\n requested_feature_types: List(str)\n List of feature types, such as ['parked_car', 'ego_intention']\n\n Raises\n ------\n Exception\n Raised if an ontology from an agent container is not in our ontology registry.\n " assert len(agent_containers), 'SceneContainers is empty.' requested_agent_types = ([] if (requested_agent_types is None) else requested_agent_types) if ((not requested_agent_types) or (not requested_feature_types)): return cls(agent_containers, directory=os.path.dirname(agent_containers[0].directory), feature_ontology_table={}, ontology_table={}) feature_ontology_table = {} dataset_ontology_table = {} logging.info('Building ontology table.') st = time.time() unique_scenes = {os.path.basename(f): agent_container for agent_container in agent_containers for (_, _, filenames) in os.walk(os.path.join(agent_container.directory, FEATURE_ONTOLOGY_FOLDER)) for f in filenames} for (_, agent_container) in unique_scenes.items(): for (feature_ontology_key, ontology_file) in agent_container.feature_ontology_files.items(): if (feature_ontology_key in FEATURE_ONTOLOGY_REGISTRY): if (feature_ontology_key not in requested_feature_types): continue feature_ontology_spec = FEATURE_ONTOLOGY_REGISTRY[feature_ontology_key] if (feature_ontology_spec is None): continue if (feature_ontology_key not in feature_ontology_table): feature_ontology_table[feature_ontology_key] = feature_ontology_spec.load(ontology_file) else: assert (feature_ontology_table[feature_ontology_key] == feature_ontology_spec.load(ontology_file)), 'Inconsistent ontology for key {}.'.format(feature_ontology_key) else: raise Exception(f'Ontology for key {feature_ontology_key} not found in registry!') for (ontology_key, ontology_file) in agent_container.ontology_files.items(): if (ontology_key in ONTOLOGY_REGISTRY): if (ANNOTATION_TYPE_TO_AGENT_TYPE[ontology_key] not in requested_agent_types): continue ontology_spec = ONTOLOGY_REGISTRY[ontology_key] if (ontology_spec is None): continue if (ontology_key not in dataset_ontology_table): dataset_ontology_table[ontology_key] = ontology_spec.load(ontology_file) else: assert (dataset_ontology_table[ontology_key] == ontology_spec.load(ontology_file)), 'Inconsistent ontology for key {}.'.format(ontology_key) else: raise Exception(f'Ontology for key {ontology_key} not found in registry!') logging.info(f'Ontology table built in {(time.time() - st):.2f}s.') return cls(agent_containers, directory=os.path.dirname(agent_containers[0].directory), feature_ontology_table=feature_ontology_table, ontology_table=dataset_ontology_table) @staticmethod def get_dataset_splits(agents_json): 'Get a list of splits in the agent_json.json.\n\n Parameters\n ----------\n agents_json: str\n Full path to the agents json holding agent metadata, agent splits.\n\n Returns\n -------\n agents_splits: list of str\n List of agents splits (train | val | test | train_overfit).\n\n ' assert agents_json.endswith('.json'), 'Please provide a dataset.json file.' agents = open_pbobject(agents_json, AgentsPb2) return [dataset_pb2.DatasetSplit.DESCRIPTOR.values_by_number[split_index].name.lower() for split_index in agents.agents_splits]
class _ParallelDomainDataset(_SynchronizedDataset): 'Dataset for PD data. Works just like normal SynchronizedSceneDataset,\n with special keyword datum name "lidar". When this datum is requested,\n all lidars are coalesced into a single "lidar" datum.\n\n Parameters\n ----------\n dataset_metadata: DatasetMetadata\n Dataset metadata, populated from scene dataset JSON\n\n scenes: list[SceneContainer], default: None\n List of SceneContainers parsed from scene dataset JSON\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_annotations: tuple, default: None\n Tuple of annotation types, i.e. (\'bounding_box_2d\', \'bounding_box_3d\'). Should be equivalent\n to directory containing annotation from dataset root.\n\n requested_autolabels: tuple[str], default: None\n Tuple of annotation types similar to `requested_annotations`, but associated with a particular autolabeling model.\n Expected format is "<model_id>/<annotation_type>"\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n generate_depth_from_datum: str, default: None\n Datum name of the point cloud. If is not None, then the depth map will be generated for the camera using\n the desired point cloud.\n\n only_annotated_datums: bool, default: False\n If True, only datums with annotations matching the requested annotation types are returned.\n\n use_virtual_camera_datums: bool, default: True\n If True, uses virtual camera datums. See dgp.datasets.pd_dataset.VIRTUAL_CAMERA_DATUM_NAMES for more details.\n\n accumulation_context: dict, default None\n Dictionary of datum names containing a tuple of (backward_context, forward_context) for sensor accumulation.\n For example, \'accumulation_context={\'lidar\':(3,1)} accumulates lidar points over the past three time steps and\n one forward step. Only valid for lidar and radar datums.\n\n transform_accumulated_box_points: bool, default: False\n Flag to use cuboid pose and instance id to warp points when using lidar accumulation.\n\n autolabel_root: str, default: None\n Path to autolabels.\n ' def __init__(self, dataset_metadata, scenes=None, datum_names=None, requested_annotations=None, requested_autolabels=None, forward_context=0, backward_context=0, generate_depth_from_datum=None, only_annotated_datums=False, use_virtual_camera_datums=True, accumulation_context=None, transform_accumulated_box_points=False, autolabel_root=None): self.coalesce_point_cloud = ((datum_names is not None) and (COALESCED_LIDAR_DATUM_NAME in datum_names)) self.use_virtual_camera_datums = use_virtual_camera_datums if self.coalesce_point_cloud: self._datum_name_to_index = {datum_name: datum_idx for (datum_idx, datum_name) in enumerate(datum_names)} new_datum_names = [datum_name for datum_name in datum_names if (COALESCED_LIDAR_DATUM_NAME != datum_name)] new_datum_names.extend(LIDAR_DATUM_NAMES) if use_virtual_camera_datums: new_datum_names.extend(VIRTUAL_CAMERA_DATUM_NAMES) if ((accumulation_context is not None) and (COALESCED_LIDAR_DATUM_NAME in accumulation_context)): acc_context = accumulation_context.pop(COALESCED_LIDAR_DATUM_NAME) updated_acc = {datum_name: acc_context for datum_name in LIDAR_DATUM_NAMES} accumulation_context.update(updated_acc) logging.info('Datum names with lidar datums={}'.format(new_datum_names)) datum_names = new_datum_names super().__init__(dataset_metadata=dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, forward_context=forward_context, backward_context=backward_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, accumulation_context=accumulation_context, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root) def coalesce_pc_data(self, items): 'Combine set of point cloud datums into a single point cloud.\n\n Parameters\n ----------\n items: list\n List of OrderedDict, containing parsed point cloud or image data.\n\n Returns\n -------\n coalesced_pc: OrderedDict\n OrderedDict containing coalesced point cloud and associated metadata.\n ' pc_items = [item for item in items if (POINT_CLOUD_KEY in item)] assert self.coalesce_point_cloud assert (len(pc_items) == len(LIDAR_DATUM_NAMES)) if (len(self.requested_autolabels) > 0): logging.warning('autolabels were requested, however point cloud coalesce does not support coalescing autolabels') coalesced_pc = OrderedDict() (X_V_merged, bbox_3d_V_merged, instance_ids_merged) = ([], [], []) total_bounding_box_3d = 0 for item in pc_items: X_S = item[POINT_CLOUD_KEY] p_VS = item['extrinsics'] X_V_merged.append((p_VS * X_S)) if ('bounding_box_3d' in item): total_bounding_box_3d += len(item['bounding_box_3d']) for bbox_3d in item['bounding_box_3d']: if (bbox_3d.instance_id not in instance_ids_merged): instance_ids_merged.append(bbox_3d.instance_id) bbox_3d_V_merged.append((p_VS * bbox_3d)) coalesced_pc['datum_name'] = COALESCED_LIDAR_DATUM_NAME coalesced_pc['timestamp'] = pc_items[0]['timestamp'] coalesced_pc[POINT_CLOUD_KEY] = np.vstack(X_V_merged) coalesced_pc['extra_channels'] = np.vstack([item['extra_channels'] for item in pc_items]) coalesced_pc['extrinsics'] = Pose() p_LS = pc_items[0]['pose'] p_VS = pc_items[0]['extrinsics'] p_LV = (p_LS * p_VS.inverse()) coalesced_pc['pose'] = p_LV if len(bbox_3d_V_merged): ontology = pc_items[0]['bounding_box_3d'].ontology coalesced_pc['bounding_box_3d'] = BoundingBox3DAnnotationList(ontology, bbox_3d_V_merged) if ('bounding_box_3d' in coalesced_pc.keys()): assert (len(coalesced_pc['bounding_box_3d']) <= total_bounding_box_3d) return coalesced_pc def coalesce_sample(self, sample): 'Coalesce a point cloud for a single sample.\n\n Parameters\n ----------\n sample: list\n List of OrderedDict, containing parsed point cloud or image data.\n ' items_dict = OrderedDict() items_dict[self._datum_name_to_index[COALESCED_LIDAR_DATUM_NAME]] = self.coalesce_pc_data(sample) items_dict.update({self._datum_name_to_index[item['datum_name']]: item for item in sample if ((POINT_CLOUD_KEY not in item) and (item['datum_name'] not in VIRTUAL_CAMERA_DATUM_NAMES))}) if self.use_virtual_camera_datums: virtual_camera_datums = [item for item in sample if (item['datum_name'] in VIRTUAL_CAMERA_DATUM_NAMES)] virtual_camera_datums = {(idx + len(items_dict)): item for (idx, item) in enumerate(virtual_camera_datums)} items_dict.update(virtual_camera_datums) indices_and_items_sorted = sorted(list(items_dict.items()), key=(lambda tup: tup[0])) aligned_items = list(map((lambda tup: tup[1]), indices_and_items_sorted)) return aligned_items def __getitem__(self, idx): sample_data = super().__getitem__(idx) if self.coalesce_point_cloud: if ((self.forward_context > 0) or (self.backward_context > 0)): sample_data = [self.coalesce_sample(t_item) for t_item in sample_data] else: sample_data = [self.coalesce_sample(sample_data[0])] return sample_data
class ParallelDomainSceneDataset(_ParallelDomainDataset): '\n Refer to SynchronizedSceneDataset for parameters.\n ' def __init__(self, scene_dataset_json, split='train', datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, generate_depth_from_datum=None, only_annotated_datums=False, use_virtual_camera_datums=True, skip_missing_data=False, accumulation_context=None, dataset_root=None, transform_accumulated_box_points=False, use_diskcache=True, autolabel_root=None): if (not use_diskcache): logging.warning('Instantiating a dataset with use_diskcache=False may exhaust memory with a large dataset.') scenes = BaseDataset._extract_scenes_from_scene_dataset_json(scene_dataset_json, split, requested_autolabels, is_datums_synchronized=True, skip_missing_data=skip_missing_data, dataset_root=dataset_root, use_diskcache=use_diskcache, autolabel_root=autolabel_root) dataset_metadata = DatasetMetadata.from_scene_containers(scenes, requested_annotations, requested_autolabels, autolabel_root=autolabel_root) super().__init__(dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, use_virtual_camera_datums=use_virtual_camera_datums, accumulation_context=accumulation_context, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root)
class ParallelDomainScene(_ParallelDomainDataset): '\n Refer to SynchronizedScene for parameters.\n ' def __init__(self, scene_json, datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, generate_depth_from_datum=None, only_annotated_datums=False, use_virtual_camera_datums=True, skip_missing_data=False, accumulation_context=None, transform_accumulated_box_points=False, use_diskcache=True, autolabel_root=None): if (not use_diskcache): logging.warning('Instantiating a dataset with use_diskcache=False may exhaust memory with a large dataset.') scene = BaseDataset._extract_scene_from_scene_json(scene_json, requested_autolabels, is_datums_synchronized=True, skip_missing_data=skip_missing_data, use_diskcache=use_diskcache, autolabel_root=autolabel_root) dataset_metadata = DatasetMetadata.from_scene_containers([scene], requested_annotations, requested_autolabels, autolabel_root=autolabel_root) super().__init__(dataset_metadata, scenes=[scene], datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, use_virtual_camera_datums=use_virtual_camera_datums, accumulation_context=accumulation_context, transform_accumulated_box_points=transform_accumulated_box_points)
class _SynchronizedDataset(BaseDataset): 'Multi-modal dataset with sample-level synchronization.\n See BaseDataset for input parameters for the parent class.\n\n Parameters\n ----------\n dataset_metadata: DatasetMetadata\n Dataset metadata, populated from scene dataset JSON\n\n scenes: list[SceneContainer], default: None\n List of SceneContainers parsed from scene dataset JSON\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_annotations: tuple, default: None\n Tuple of annotation types, i.e. (\'bounding_box_2d\', \'bounding_box_3d\'). Should be equivalent\n to directory containing annotation from dataset root.\n\n requested_autolabels: tuple[str], default: None\n Tuple of annotation types similar to `requested_annotations`, but associated with a particular autolabeling model.\n Expected format is "<model_id>/<annotation_type>"\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n accumulation_context: dict, default None\n Dictionary of datum names containing a tuple of (backward_context, forward_context) for sensor accumulation. For example, \'accumulation_context={\'lidar\':(3,1)}\n accumulates lidar points over the past three time steps and one forward step. Only valid for lidar and radar datums.\n\n generate_depth_from_datum: str, default: None\n Datum name of the point cloud. If is not None, then the depth map will be generated for the camera using\n the desired point cloud.\n\n only_annotated_datums: bool, default: False\n If True, only datums with annotations matching the requested annotation types are returned.\n\n transform_accumulated_box_points: bool, default: False\n Flag to use cuboid pose and instance id to warp points when using lidar accumulation.\n\n autolabel_root: str, default: None\n Path to autolabels.\n\n ignore_raw_datum: Optional[list[str]], default: None\n Optionally pass a list of datum types to skip loading their raw data (but still load their annotations). For\n example, ignore_raw_datum=[\'image\'] will skip loading the image rgb data. The rgb key will be set to None.\n This is useful when only annotations or extrinsics are needed. Allowed values are any combination of\n \'image\',\'point_cloud\',\'radar_point_cloud\' \n ' def __init__(self, dataset_metadata, scenes=None, datum_names=None, requested_annotations=None, requested_autolabels=None, forward_context=0, backward_context=0, accumulation_context=None, generate_depth_from_datum=None, only_annotated_datums=False, transform_accumulated_box_points=False, autolabel_root=None, ignore_raw_datum=None): self.set_context(backward=backward_context, forward=forward_context, accumulation_context=accumulation_context) self.generate_depth_from_datum = generate_depth_from_datum self.only_annotated_datums = (only_annotated_datums if (requested_annotations or requested_autolabels) else False) self.transform_accumulated_box_points = transform_accumulated_box_points super().__init__(dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, autolabel_root=autolabel_root, ignore_raw_datum=ignore_raw_datum) def _build_item_index(self): '\n Synchronizes at the sample-level and only adds sample indices if context frames are available.\n This is enforced by adding sample indices that fall in (bacwkard_context, N-forward_context) range.\n\n Returns\n -------\n item_index: list\n List of dataset items that contain index into\n [(scene_idx, sample_within_scene_idx, [datum_names]), ...].\n ' logging.info(f'{self.__class__.__name__} :: Building item index for {len(self.scenes)} scenes, this will take a while.') st = time.time() (acc_back, acc_forward) = (0, 0) if self.accumulation_context: acc_context = [(v[0], v[1]) for v in self.accumulation_context.values()] (acc_back, acc_forward) = np.max(acc_context, 0) with Pool(cpu_count()) as proc: item_index = proc.starmap(partial(_SynchronizedDataset._item_index_for_scene, backward_context=(self.backward_context + acc_back), forward_context=(self.forward_context + acc_forward), only_annotated_datums=self.only_annotated_datums), [(scene_idx, scene) for (scene_idx, scene) in enumerate(self.scenes)]) logging.info(f'Index built in {(time.time() - st):.2f}s.') assert (len(item_index) > 0), 'Failed to index items in the dataset.' item_index = list(itertools.chain.from_iterable(item_index)) item_index = [item for item in item_index if (item is not None)] item_lengths = [len(item_tup) for item_tup in item_index] assert all([(l == item_lengths[0]) for l in item_lengths]), 'All sample items are not of the same length, datum names might be missing.' return item_index @staticmethod def _item_index_for_scene(scene_idx, scene, backward_context, forward_context, only_annotated_datums): st = time.time() logging.debug(f'Indexing scene items for {scene.scene_path}') if (not only_annotated_datums): sample_range = np.arange(backward_context, (len(scene.datum_index) - forward_context)) scene_item_index = [(scene_idx, sample_idx, scene.selected_datums) for sample_idx in sample_range] logging.debug(f'No annotation filter--- Scene item index built in {(time.time() - st):.2f}s.') else: sample_range = np.arange(0, len(scene.datum_index)) annotated_samples = scene.annotation_index[scene.datum_index[sample_range]].any(axis=(1, 2)) scene_item_index = [] for idx in range(backward_context, (len(scene.datum_index) - forward_context)): if all(annotated_samples.data[(idx - backward_context):((idx + 1) + forward_context)]): scene_item_index.append((scene_idx, sample_range[idx], scene.selected_datums)) logging.debug(f'Annotation filter -- Scene item index built in {(time.time() - st):.2f}s.') return scene_item_index def set_context(self, backward=1, forward=1, accumulation_context=None): 'Set the context size and strides.\n\n Parameters\n ----------\n backward: int, optional\n Backward context in frames [T-backward, ..., T-1]. Default: 1.\n\n forward: int, optional\n Forward context in frames [T+1, ..., T+forward]. Default: 1.\n\n accumulation_context: dict, optional\n Dictionary of accumulation context. Default: None\n ' assert ((backward >= 0) and (forward >= 0)), 'Provide valid context' if accumulation_context: for (k, v) in accumulation_context.items(): assert (v[0] >= 0), f'Provide valid accumulation backward context for {k}' assert (v[1] >= 0), f'Provide valid accumulation forward context for {k}' if (v[1] > 0): logging.warning(f'Forward accumulation context is enabled for {k}. Doing so at inference time is not suggested, is this intentional?') self.backward_context = backward self.forward_context = forward self.accumulation_context = accumulation_context if self.accumulation_context: self.accumulation_context = {k.lower(): v for (k, v) in self.accumulation_context.items()} def get_context_indices(self, sample_idx): 'Utility to get the context sample indices given the sample_idx.\n\n Parameters\n ----------\n sample_idx: int\n Sample index (T).\n\n Returns\n -------\n context_indices: list\n Sample context indices for T, i.e. [T-1, T, T+1, T+2] if\n backward_context=1, forward_context=2.\n ' return list(range((sample_idx - self.backward_context), ((sample_idx + self.forward_context) + 1))) def __len__(self): 'Return the length of the dataset.' return len(self.dataset_item_index) def get_datum_data(self, scene_idx, sample_idx_in_scene, datum_name): 'Get the datum at (scene_idx, sample_idx_in_scene, datum_name) with labels (optionally)\n\n Parameters\n ----------\n scene_idx: int\n Scene index.\n\n sample_idx_in_scene: int\n Sample index within the scene.\n\n datum_name: str\n Datum within the sample.\n\n Raises\n ------\n ValueError\n Raised if the type of the requested datum is unsupported.\n ' datum = self.get_datum(scene_idx, sample_idx_in_scene, datum_name) datum_type = datum.datum.WhichOneof('datum_oneof') if (datum_type == 'image'): (datum_data, annotations) = self.get_image_from_datum(scene_idx, sample_idx_in_scene, datum_name) if self.generate_depth_from_datum: datum_data['depth'] = get_depth_from_point_cloud(self, scene_idx, sample_idx_in_scene, datum_name, self.generate_depth_from_datum.lower()) elif (datum_type == 'point_cloud'): (datum_data, annotations) = self.get_point_cloud_from_datum(scene_idx, sample_idx_in_scene, datum_name) elif (datum_type == 'file_datum'): (datum_data, annotations) = self.get_file_meta_from_datum(scene_idx, sample_idx_in_scene, datum_name) elif (datum_type == 'radar_point_cloud'): (datum_data, annotations) = self.get_radar_point_cloud_from_datum(scene_idx, sample_idx_in_scene, datum_name) else: raise ValueError('Unknown datum type: {}'.format(datum_type)) if annotations: datum_data.update(annotations) datum_data['datum_type'] = datum_type return datum_data def __getitem__(self, index): 'Get the dataset item at index.\n\n Parameters\n ----------\n index: int\n Index of item to get.\n\n Returns\n -------\n data: list of list of OrderedDict\n\n "timestamp": int\n Timestamp of the image in microseconds.\n\n "datum_name": str\n Sensor name from which the data was collected\n\n "rgb": PIL.Image (mode=RGB)\n Image in RGB format.\n\n "intrinsics": np.ndarray\n Camera intrinsics.\n\n "extrinsics": Pose\n Camera extrinsics with respect to the world frame.\n\n "pose": Pose\n Pose of sensor with respect to the world/global frame\n\n Returns a list of list of OrderedDict(s).\n Outer list corresponds to temporal ordering of samples. Each element is\n a list of OrderedDict(s) corresponding to synchronized datums.\n\n In other words, __getitem__ returns a nested list with the ordering as\n follows: (C, D, I), where\n C = forward_context + backward_context + 1,\n D = len(datum_names)\n I = OrderedDict item\n ' assert (self.dataset_item_index is not None), 'Index is not built, select datums before getting elements.' (scene_idx, sample_idx_in_scene, datum_names) = self.dataset_item_index[index] datums_with_context = dict() for datum_name in datum_names: (acc_back, acc_forward) = (0, 0) if self.accumulation_context: accumulation_context = self.accumulation_context.get(datum_name.lower(), (0, 0)) (acc_back, acc_forward) = accumulation_context datum_list = [self.get_datum_data(scene_idx, (sample_idx_in_scene + offset), datum_name) for offset in range(((- 1) * (self.backward_context + acc_back)), ((self.forward_context + acc_forward) + 1))] if ((acc_back != 0) or (acc_forward != 0)): assert ('point_cloud' in datum_list[0]), 'Accumulation is only defined for radar and lidar currently.' datum_list = [accumulate_points(datum_list[(k - acc_back):((k + acc_forward) + 1)], datum_list[k], self.transform_accumulated_box_points) for k in range(acc_back, (len(datum_list) - acc_forward))] datums_with_context[datum_name] = datum_list context_window = [] for t in range(((self.backward_context + self.forward_context) + 1)): context_window.append([datums_with_context[datum_name][t] for datum_name in datum_names]) return context_window
class SynchronizedSceneDataset(_SynchronizedDataset): 'Main entry-point for multi-modal dataset with sample-level\n synchronization using scene directories as input.\n\n Note: This class is primarily used for self-supervised learning tasks where\n the default mode of operation is learning from a collection of scene\n directories.\n\n Parameters\n ----------\n scene_dataset_json: str\n Full path to the scene dataset json holding collections of paths to scene json.\n\n split: str, default: \'train\'\n Split of dataset to read ("train" | "val" | "test" | "train_overfit").\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_annotations: tuple, default: None\n Tuple of annotation types, i.e. (\'bounding_box_2d\', \'bounding_box_3d\'). Should be equivalent\n to directory containing annotation from dataset root.\n\n requested_autolabels: tuple[str], default: None\n Tuple of annotation types similar to `requested_annotations`, but associated with a particular autolabeling model.\n Expected format is "<model_id>/<annotation_type>"\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n accumulation_context: dict, default None\n Dictionary of datum names containing a tuple of (backward_context, forward_context) for sensor accumulation.\n For example, \'accumulation_context={\'lidar\':(3,1)} accumulates lidar points over the past three time steps and\n one forward step. Only valid for lidar and radar datums.\n\n generate_depth_from_datum: str, default: None\n Datum name of the point cloud. If is not None, then the depth map will be generated for the camera using\n the desired point cloud.\n\n only_annotated_datums: bool, default: False\n If True, only datums with annotations matching the requested annotation types are returned.\n\n skip_missing_data: bool, default: False\n If True, check for missing files and skip during datum index building.\n\n dataset_root: str\n Optional path to dataset root folder. Useful if dataset scene json is not in the same directory as the rest of the data.\n\n transform_accumulated_box_points: bool, default: False\n Flag to use cuboid pose and instance id to warp points when using lidar accumulation.\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes.\n\n autolabel_root: str, default: None\n Path to autolabels if not stored inside scene root. Note this must still respect the scene structure, i.e,\n autolabel_root = \'/some-autolabels\' means the autolabel scene.json is found at\n /some-autolabels/<scene-dir>/autolabels/my-model/scene.json.\n\n ignore_raw_datum: Optional[list[str]], default: None\n Optionally pass a list of datum types to skip loading their raw data (but still load their annotations). For\n example, ignore_raw_datum=[\'image\'] will skip loading the image rgb data. The rgb key will be set to None.\n This is useful when only annotations or extrinsics are needed. Allowed values are any combination of\n \'image\',\'point_cloud\',\'radar_point_cloud\'\n\n Refer to _SynchronizedDataset for remaining parameters.\n ' def __init__(self, scene_dataset_json, split='train', datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, accumulation_context=None, generate_depth_from_datum=None, only_annotated_datums=False, skip_missing_data=False, dataset_root=None, transform_accumulated_box_points=False, use_diskcache=True, autolabel_root=None, ignore_raw_datum=None): if (not use_diskcache): logging.warning('Instantiating a dataset with use_diskcache=False may exhaust memory with a large dataset.') scenes = BaseDataset._extract_scenes_from_scene_dataset_json(scene_dataset_json, split, requested_autolabels, is_datums_synchronized=True, skip_missing_data=skip_missing_data, dataset_root=dataset_root, use_diskcache=use_diskcache, autolabel_root=autolabel_root) dataset_metadata = DatasetMetadata.from_scene_containers(scenes, requested_annotations, requested_autolabels, autolabel_root=autolabel_root) super().__init__(dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, accumulation_context=accumulation_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root, ignore_raw_datum=ignore_raw_datum)
class SynchronizedScene(_SynchronizedDataset): 'Main entry-point for multi-modal dataset with sample-level\n synchronization using a single scene JSON as input.\n\n Note: This class can be used to introspect a single scene given a scene\n directory with its associated scene JSON.\n\n Parameters\n ----------\n scene_json: str\n Full path to the scene json.\n\n datum_names: list, default: None\n Select list of datum names for synchronization (see self.select_datums(datum_names)).\n\n requested_annotations: tuple, default: None\n Tuple of annotation types, i.e. (\'bounding_box_2d\', \'bounding_box_3d\'). Should be equivalent\n to directory containing annotation from dataset root.\n\n requested_autolabels: tuple[str], default: None\n Tuple of annotation types similar to `requested_annotations`, but associated with a particular autolabeling model.\n Expected format is "<model_id>/<annotation_type>"\n\n backward_context: int, default: 0\n Backward context in frames [T-backward, ..., T-1]\n\n forward_context: int, default: 0\n Forward context in frames [T+1, ..., T+forward]\n\n accumulation_context: dict, default None\n Dictionary of datum names containing a tuple of (backward_context, forward_context) for sensor accumulation.\n For example, \'accumulation_context={\'lidar\':(3,1)} accumulates lidar points over the past three time steps and\n one forward step. Only valid for lidar and radar datums.\n\n generate_depth_from_datum: str, default: None\n Datum name of the point cloud. If is not None, then the depth map will be generated for the camera using\n the desired point cloud.\n\n only_annotated_datums: bool, default: False\n If True, only datums with annotations matching the requested annotation types are returned.\n\n transform_accumulated_box_points: bool, default: False\n Flag to use cuboid pose and instance id to warp points when using lidar accumulation.\n\n use_diskcache: bool, default: True\n If True, cache ScenePb2 object using diskcache. If False, save the object in memory.\n NOTE: Setting use_diskcache to False would exhaust the memory if have a large number of scenes.\n\n autolabel_root: str, default: None\n Path to autolabels if not stored inside scene root. Note this must still respect the scene structure, i.e,\n autolabel_root = \'/some-autolabels\' means the autolabel scene.json is found at\n /some-autolabels/<scene-dir>/autolabels/my-model/scene.json.\n\n ignore_raw_datum: Optional[list[str]], default: None\n Optionally pass a list of datum types to skip loading their raw data (but still load their annotations). For\n example, ignore_raw_datum=[\'image\'] will skip loading the image rgb data. The rgb key will be set to None.\n This is useful when only annotations or extrinsics are needed. Allowed values are any combination of\n \'image\',\'point_cloud\',\'radar_point_cloud\'\n\n Refer to _SynchronizedDataset for remaining parameters.\n ' def __init__(self, scene_json, datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, accumulation_context=None, generate_depth_from_datum=None, only_annotated_datums=False, transform_accumulated_box_points=False, use_diskcache=True, autolabel_root=None, ignore_raw_datum=None): if (not use_diskcache): logging.warning('Instantiating a dataset with use_diskcache=False may exhaust memory with a large dataset.') scene = BaseDataset._extract_scene_from_scene_json(scene_json, requested_autolabels, is_datums_synchronized=True, use_diskcache=use_diskcache, autolabel_root=autolabel_root) dataset_metadata = DatasetMetadata.from_scene_containers([scene], requested_annotations, requested_autolabels, autolabel_root=autolabel_root) super().__init__(dataset_metadata, scenes=[scene], datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, accumulation_context=accumulation_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root, ignore_raw_datum=ignore_raw_datum)
class FeatureOntology(): 'Feature ontology object. At bare minimum, we expect ontologies to provide:\n ID: (int) identifier for feature field name\n Name: (str) string identifier for feature field name\n\n Based on the task, additional fields may be populated. Refer to `dataset.proto` and `ontology.proto`\n specifications for more details. Can be constructed from file or from deserialized proto object.\n\n Parameters\n ----------\n feature_ontology_pb2: OntologyPb2\n Deserialized ontology object.\n ' VOID_ID = 255 VOID_CLASS = 'Void' def __init__(self, feature_ontology_pb2): self._ontology = feature_ontology_pb2 if isinstance(self._ontology, FeatureOntologyPb2): self._name_to_id = OrderedDict(sorted([(ontology_item.name, ontology_item.id) for ontology_item in self._ontology.items])) self._id_to_name = OrderedDict(sorted([(ontology_item.id, ontology_item.name) for ontology_item in self._ontology.items])) self._id_to_feature_value_type = OrderedDict(sorted([(ontology_item.id, ontology_item.feature_value_type) for ontology_item in self._ontology.items])) else: raise TypeError('Unexpected type {}, expected FeatureOntologyV2'.format(type(self._ontology))) self._feature_ids = sorted(self._id_to_name.keys()) self._feature_names = [self._id_to_name[c_id] for c_id in self._feature_ids] @classmethod def load(cls, ontology_file): 'Construct an ontology from an ontology JSON.\n\n Parameters\n ----------\n ontology_file: str\n Path to ontology JSON\n\n Raises\n ------\n FileNotFoundError\n Raised if ontology_file does not exist.\n TypeError\n Raised if we could not read an ontology out of the ontology file.\n ' if os.path.exists(ontology_file): feature_ontology_pb2 = open_feature_ontology_pbobject(ontology_file) else: raise FileNotFoundError('Could not find {}'.format(ontology_file)) if (feature_ontology_pb2 is not None): return cls(feature_ontology_pb2) raise TypeError('Could not open ontology {}'.format(ontology_file)) def to_proto(self): 'Serialize ontology. Only supports exporting in OntologyV2.\n\n Returns\n -------\n OntologyPb2\n Serialized ontology\n ' return FeatureOntologyPb2(items=[FeatureOntologyItem(name=name, id=feature_id, feature_value_type=self.id_to_feature_value_type[feature_id]) for (feature_id, name) in self._id_to_name.items()]) def save(self, save_dir): 'Write out ontology items to `<sha>.json`. SHA generated from Ontology proto object.\n\n Parameters\n ----------\n save_dir: str\n Directory in which to save serialized ontology.\n\n Returns\n -------\n output_ontology_file: str\n Path to serialized ontology file.\n ' os.makedirs(save_dir, exist_ok=True) return save_pbobject_as_json(self.to_proto(), save_path=save_dir) @property def num_classes(self): return len(self._feature_ids) @property def class_names(self): return self._feature_names @property def class_ids(self): return self._feature_ids @property def name_to_id(self): return self._name_to_id @property def id_to_name(self): return self._id_to_name @property def id_to_feature_value_type(self): return self._id_to_feature_value_type @property def hexdigest(self): 'Hash object' return generate_uid_from_pbobject(self.to_proto()) def __eq__(self, other): return (self.hexdigest == other.hexdigest) def __repr__(self): return '{}[{}]'.format(self.__class__.__name__, os.path.basename(self.hexdigest))
class AgentFeatureOntology(FeatureOntology): 'Agent feature ontologies derive directly from Ontology'
def tqdm(*args, **kwargs): kwargs['disable'] = DGP_DISABLE_TQDM return _tqdm(*args, **kwargs)
def points_in_cuboid(query_points, cuboid): 'Tests if a point is contained by a cuboid. Assumes points are in the same frame as the cuboid, \n i.e, cuboid.pose translates points in the cuboid local frame to the query_point frame.\n\n Parameters\n ----------\n query_points: np.ndarray\n Numpy array shape (N,3) of points.\n\n cuboid: dgp.utils.structures.bounding_box_3d.BoundingBox3D\n Cuboid in same reference frame as query points\n\n Returns\n -------\n in_view: np.ndarray\n Numpy boolean array shape (N,) where a True entry denotes a point insided the cuboid\n ' corners = cuboid.corners a = (corners[1] - corners[0]) b = (corners[3] - corners[0]) c = (corners[4] - corners[0]) ma = np.linalg.norm(a) mb = np.linalg.norm(b) mc = np.linalg.norm(c) a = (a / ma) b = (b / mb) c = (c / mc) v = (query_points - corners[0]) proj_a = np.dot(v, a) proj_b = np.dot(v, b) proj_c = np.dot(v, c) in_view = np.logical_and.reduce([(proj_a >= 0), (proj_b >= 0), (proj_c >= 0), (proj_a <= ma), (proj_b <= mb), (proj_c <= mc)]) return in_view
def accumulate_points(point_datums, target_datum, transform_boxes=False): "Accumulates lidar or radar points by transforming all datums in point_datums to the frame used in target_datum.\n\n Parameters\n ----------\n point_datums: list[dict]\n List of datum dictionaries to accumulate\n\n target_datum: dict\n A single datum to use as the reference frame and reference time.\n\n transform_boxes: bool, optional\n Flag to denote if cuboid annotations and instance ids should be used to warp points to the target frame.\n Only valid for Lidar.\n Default: False.\n\n Returns\n -------\n p_target: dict\n A new datum with accumulated points and an additional field 'accumulation_offset_t'\n that indicates the delta in microseconds between the target_datum and the given point.\n " assert ('point_cloud' in point_datums[0]), 'Accumulation is only defined for radar and lidar currently.' p_target = deepcopy(target_datum) pose_target_w = p_target['pose'].inverse() new_fields = defaultdict(list) target_boxes = {} if (transform_boxes and ('bounding_box_3d' in target_datum)): target_boxes = {box.instance_id: box for box in target_datum['bounding_box_3d']} for (_, p) in enumerate(point_datums): pose_p2p1 = (pose_target_w * p['pose']) new_points = (pose_p2p1 * p['point_cloud']) new_fields['point_cloud'].append(new_points) if ('velocity' in p_target): new_vel = ((pose_p2p1 * (p['point_cloud'] + p['velocity'])) - new_points) new_fields['velocity'].append(new_vel) if ('covariance' in p_target): R = pose_p2p1.rotation_matrix new_cov = ((R @ p['covariance']) @ R.T) new_fields['covariance'].append(new_cov) if ('extra_channels' in p_target): new_fields['extra_channels'].append(p['extra_channels']) dt = (p_target['timestamp'] - p['timestamp']) new_fields['accumulation_offset_t'].append((dt * np.ones(len(new_points)))) if (transform_boxes and ('bounding_box_3d' in p)): if ('velocity' in p): continue for _box in p['bounding_box_3d']: if (_box.instance_id not in target_boxes): continue box = deepcopy(_box) box._pose = (pose_p2p1 * box.pose) in_box = points_in_cuboid(new_fields['point_cloud'][(- 1)], box) if np.any(in_box): points_to_move = new_fields['point_cloud'][(- 1)][in_box] pose_p2box1 = (target_boxes[box.instance_id].pose * box.pose.inverse()) moved_points = (pose_p2box1 * points_to_move) new_fields['point_cloud'][(- 1)][in_box] = moved_points for k in new_fields: p_target[k] = np.concatenate(new_fields[k], axis=0) return p_target
def is_empty_annotation(annotation_file, annotation_type): 'Check if JSON style annotation files are empty\n\n Parameters\n ----------\n annotation_file: str\n Path to JSON file containing annotations for 2D/3D bounding boxes\n\n annotation_type: object\n Protobuf pb2 object we want to load into.\n\n Returns\n -------\n bool:\n True if empty annotation, otherwise False\n ' with open(annotation_file, encoding=locale.getpreferredencoding()) as _f: annotations = open_pbobject(annotation_file, annotation_type) return (len(list(annotations.annotations)) == 0)
@diskcache(protocol='npz') def get_depth_from_point_cloud(dataset, scene_idx, sample_idx_in_scene, cam_datum_name, pc_datum_name): "Generate the depth map in the camera view using the provided point cloud\n datum within the sample.\n\n Parameters\n ----------\n dataset: dgp.dataset.BaseDataset\n Inherited base dataset to augment with depth data.\n\n scene_idx: int\n Index of the scene.\n\n sample_idx_in_scene: int\n Index of the sample within the scene at scene_idx.\n\n cam_datum_name: str\n Name of camera datum within the sample.\n\n pc_datum_name: str\n Name of the point cloud datum within the sample.\n\n Returns\n -------\n depth: np.ndarray\n Depth map from the camera's viewpoint.\n " pc_datum = dataset.get_datum(scene_idx, sample_idx_in_scene, pc_datum_name) pc_datum_type = pc_datum.datum.WhichOneof('datum_oneof') assert (pc_datum_type == 'point_cloud'), 'Depth cannot be generated from {} {} {}'.format(pc_datum_type, pc_datum_name, pc_datum) (pc_datum_data, _) = dataset.get_point_cloud_from_datum(scene_idx, sample_idx_in_scene, pc_datum_name) X_W = (pc_datum_data['pose'] * pc_datum_data['point_cloud']) cam_datum = dataset.get_datum(scene_idx, sample_idx_in_scene, cam_datum_name) cam_datum_type = cam_datum.datum.WhichOneof('datum_oneof') assert (cam_datum_type == 'image'), 'Depth cannot be projected onto {} '.format(cam_datum_type) (cam_datum_data, _) = dataset.get_image_from_datum(scene_idx, sample_idx_in_scene, cam_datum_name) p_WC = cam_datum_data['pose'] camera = Camera(K=cam_datum_data['intrinsics'], p_cw=p_WC.inverse()) (W, H) = cam_datum_data['rgb'].size[:2] return generate_depth_map(camera, X_W, (H, W))
def clear_cache(directory=DGP_CACHE_DIR): 'Clear DGP cache to avoid growing disk-usage.\n\n Parameters\n ----------\n directory: str, optional\n A pathname to the directory used by DGP for caching. Default: DGP_CACHE_DIR.\n ' if os.path.isdir(directory): logging.info('Clearing dgp disk-cache.') try: shutil.rmtree(directory) except OSError as e: logging.warning('Failed to clear cache {}'.format(e))
def diskcache(protocol='npz', cache_dir=None): 'Disk-caching method/function decorator that caches results into\n dgp cache for arbitrary pickle-able / numpy objects.\n\n Parameters\n ----------\n protocol: str, optional\n Serialization protocol. Choices: {npz, pkl}. Default: "npz" (numpy).\n\n cache_dir: str, optional\n Directory to cache instead of the default DGP cache. Default: None.\n ' assert (protocol in ('npz', 'pkl')), 'Unknown protocol {}'.format(protocol) logging.info('Using dgp disk-cache.') if (cache_dir is None): cache_dir = DGP_CACHE_DIR os.makedirs(cache_dir, exist_ok=True) def wrapped_diskcache(func): def serialize(_result, _filename, _protocol): 'Serialize result based on protocol' if (_protocol == 'npz'): np.savez_compressed(_filename, data=_result) elif (protocol == 'pkl'): with open(_filename, 'wb') as f: pickle.dump(_result, f) else: raise ValueError('Unknown serialization protocol {}'.format(_protocol)) def deserialize(_filename, _protocol): 'De-serialize result based on protocol' if (_protocol == 'npz'): return np.load(_filename)['data'] elif (_protocol == 'pkl'): with open(_filename, 'rb') as f: return pickle.load(f) else: raise ValueError('Unknown de-serialization protocol {}'.format(_protocol)) @wraps(func) def wrapped_func(*args, **kwargs): try: data = pickle.dumps((args, kwargs, func.__name__)) h = hashlib.md5(data) except Exception as e: raise Exception('Failed to hash: (args={}, kwargs={}): {}'.format(args, kwargs, str(e))) from e filename = os.path.join(cache_dir, '{}.{}'.format(h.hexdigest(), protocol)) try: if os.path.exists(filename): logging.info('Attempting to load disk-cached object {} [{:.2f} MiB] .'.format(filename, ((os.stat(filename).st_size / 1024) / 1024))) result = deserialize(filename, protocol) logging.info('Successfully loaded disk-cached object ({} at {}) .'.format(result.__class__.__name__, filename)) return result except: logging.info('Failed to load cached object {}'.format(filename)) result = func(*args, **kwargs) serialize(result, filename, protocol) return result return wrapped_func return wrapped_diskcache
def add_options(**kwargs): 'Decorator function to add a list of Click options to a Click subcommand.\n\n Parameters\n ----------\n **kwargs: dict\n The `options` keyword argument shall be a list of string options to extend a Click CLI.\n ' def _add_options(func): return functools.reduce((lambda x, opt: opt(x)), kwargs['options'], func) return _add_options
def init_s3_client(use_ssl=False): 'Initiate S3 AWS client.\n\n Parameters\n ----------\n use_ssl: bool, optional\n Use secure sockets layer. Provieds better security to s3, but\n can fail intermittently in a multithreaded environment. Default: False.\n\n Returns\n -------\n service: boto3.client\n S3 resource service client.\n ' global S3_CLIENT_SSL global S3_CLIENT_NO_SSL if use_ssl: if (S3_CLIENT_SSL is None): S3_CLIENT_SSL = boto3.client('s3') return S3_CLIENT_SSL if (not S3_CLIENT_NO_SSL): S3_CLIENT_NO_SSL = boto3.client('s3', use_ssl=False) return S3_CLIENT_NO_SSL
def s3_recursive_list(s3_prefix): "List all files contained in an s3 location recursively and also return their md5_sums\n NOTE: this is different from 'aws s3 ls' in that it will not return directories, but instead\n the full paths to the files contained in any directories (which is what s3 is actually tracking)\n\n Parameters\n ----------\n s3_prefix: str\n s3 prefix which we want the returned files to have\n\n Returns\n -------\n all_files: list[str]\n List of files (with full path including 's3://...')\n\n md5_sums: list[str]\n md5 sum for each of the files as returned by boto3 'ETag' field\n " assert s3_prefix.startswith('s3://') (bucket_name, prefix) = convert_uri_to_bucket_path(s3_prefix, strip_trailing_slash=False) s3_client = init_s3_client(use_ssl=False) s3_metadata = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix) if ('Contents' not in s3_metadata): return ([], []) paginator = s3_client.get_paginator('list_objects_v2').paginate(Bucket=bucket_name, Prefix=prefix) s3_metadata = [single_object for page in paginator for single_object in page['Contents']] all_files = [os.path.join('s3://', bucket_name, _file_metadata['Key']) for _file_metadata in s3_metadata] md5_sums = [_file_metadata['ETag'].strip('"') for _file_metadata in s3_metadata] return (all_files, md5_sums)
def return_last_value(retry_state): 'Return the result of the last call attempt.\n\n Parameters\n ----------\n retry_state: tenacity.RetryCallState\n Retry-state metadata for a flaky call.\n ' return retry_state.outcome.result()
def is_false(value): return (value is False)
@tenacity.retry(stop=tenacity.stop_after_attempt(3), retry=tenacity.retry_if_result(is_false), retry_error_callback=return_last_value) def s3_copy(source_path, target_path, verbose=True): 'Copy single file from local to s3, s3 to local, or s3 to s3.\n\n Parameters\n ----------\n source_path: str\n Path of file to copy\n\n target_path: str\n Path to copy file to\n\n verbose: bool, optional\n If True print some helpful messages. Default: True.\n\n Returns\n -------\n bool: True if successful\n ' if verbose: logging.getLogger().setLevel(logging.DEBUG) success = False command_str = 'aws s3 cp --acl bucket-owner-full-control {} {}'.format(source_path, target_path) logging.debug("Copying file with '{}'".format(command_str)) try: subprocess.check_output(command_str, shell=True) success = True except subprocess.CalledProcessError as e: success = False logging.error('{} failed with error code {}'.format(command_str, e.returncode)) logging.error(e.output) if verbose: logging.info('Done copying file') return success
def parallel_s3_copy(source_paths, target_paths, threadpool_size=None): 'Copy files from local to s3, s3 to local, or s3 to s3 using a threadpool.\n Retry the operation if any files fail to copy. Throw an AssertionError if it fails the 2nd time.\n\n Parameters\n ----------\n source_paths: List of str\n Full paths of files to copy.\n\n target_paths: List of str\n Full paths to copy files to.\n\n threadpool_size: int\n Number of threads to use to fetch these files. If not specified, will default to\n number of cores on the machine.\n\n ' if (threadpool_size is None): threadpool_size = cpu_count() s3_and_destination = zip(source_paths, target_paths) with Pool(threadpool_size) as thread_pool: s3_copy_function = functools.partial(s3_copy) success_list = thread_pool.starmap(s3_copy_function, s3_and_destination) num_success = sum(success_list) logging.info(f'{num_success} / {len(success_list)} files copied successfully.') for (success, source, target) in zip(success_list, source_paths, target_paths): if (not success): assert s3_copy(source, target), f'Failed to copy {source} to {target} on 2nd try'