Datasets:

DOI:
License:
ToF-360 / assets /pointcloud_eval /preprocess_tof_360.py
kanayamaHideaki's picture
Add semantics, instances, layout_eval, preprocessing and modifying README.md.
324d4da
"""
Preprocessing Script for ToF-360
Author: Mahdi Chamseddine (mahdi.chamseddine@dfki.de)
Please cite our work if the code is helpful to you.
"""
from pathlib import Path
import cv2
import numpy as np
import open3d as o3d
def map_label(label: int) -> int:
match label:
case 0 | 33 | 34: # <UNK>
return -1
case 2 | 20 | 42: # ceiling
return 0
case 3 | 18: # floor
return 1
case 1 | 40: # wall
return 2
# case 17: # beam
# return 3
case 14: # column
return 4
case 8: # window
return 5
case 7: # door
return 6
case 12: # table
return 7
case 5: # chair
return 8
case 4: # sofa
return 9
case 31: # bookcase
return 10
# case 26: # board
# return 11
case _: # clutter
return 12
def downsample(xyz: np.ndarray, voxel_size: float = 0.01) -> np.ndarray:
min_vals = np.min(xyz, axis=0)
max_vals = np.max(xyz, axis=0)
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(xyz)
_, _, indices = point_cloud.voxel_down_sample_and_trace(
voxel_size, min_vals, max_vals
)
indices = [np.random.choice(idx) for idx in indices]
return np.asarray(indices)
def preprocess_scans(area_path: Path) -> None:
xyz_dir = area_path / "XYZ"
for scan_path in xyz_dir.glob("*.npy*"):
scan_name = scan_path.stem[: -len("_xxx")]
parse_scan(scan_name, area_path)
def parse_scan(scan_name: str, area_path: Path, debug: bool = False):
output_name = area_path.stem + "_" + scan_name
print(f"Parsing scan: {output_name}", flush=True)
processed_path = (
area_path.parent.parent / "preprocessed" / area_path.parent.stem / output_name
)
# if processed_path.exists():
# return
processed_path.mkdir(parents=True, exist_ok=True)
print(f"--- [{output_name}] reading point cloud", flush=True)
xyz_path = Path(area_path / "XYZ", scan_name + "_XYZ.npy")
temp = np.load(xyz_path)
temp = temp.reshape(-1, 3) / 1000 # mm to m
coord = temp.copy()
coord[:, 1] = temp[:, 2]
coord[:, 2] = -temp[:, 1]
png_path = Path(area_path / "RGB", scan_name + "_rgb.png")
color = cv2.imread(png_path.resolve())
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB).reshape(-1, 3) / 255
print(f"--- [{output_name}] loading labels", flush=True)
semantic_path = Path(area_path / "semantics", scan_name + "_semantic.npy")
segment = np.load(semantic_path).reshape(-1)
segment = np.vectorize(map_label)(segment)
normal_path = Path(area_path / "normal", scan_name + "_normal.png")
temp = cv2.imread(normal_path.resolve())
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB).reshape(-1, 3) * 2 / 255
temp = temp - 1
normal = temp.copy()
normal[:, 1] = temp[:, 2]
normal[:, 2] = -temp[:, 1]
print(f"--- [{output_name}] down sampling", flush=True)
idx = downsample(coord)
print(f"--- [{output_name}] saving", flush=True)
coord = np.ascontiguousarray(coord[idx, :], dtype=np.float32)
np.save(Path(processed_path, "coord.npy"), coord)
color = np.ascontiguousarray(color[idx, :], dtype=np.float32)
np.save(Path(processed_path, "color.npy"), color)
normal = np.ascontiguousarray(normal[idx, :], dtype=np.float32)
np.save(Path(processed_path, "normal.npy"), normal)
segment = np.ascontiguousarray(segment[idx], dtype=np.int32)
np.save(Path(processed_path, "segment.npy"), segment)
def main():
# splits = ["test", "train", "val"]
# splits = ["train"]
splits = [""]
dataset_directory = "path/to/ToF-360/"
areas = ["Hospital", "Office_Room_1", "Office_Room_2", "Parking_Lot"]
for split in splits:
split_directory = dataset_directory + split
split_path = Path(split_directory)
# Check if the parent directory exists
if not split_path.is_dir():
print(
f"Error: '{split_path.resolve()}' is not a valid directory.",
flush=True,
)
return
for area_path in split_path.iterdir():
if area_path.is_dir() and area_path.stem in areas:
preprocess_scans(area_path)
if __name__ == "__main__":
main()