Datasets:

DOI:
License:
File size: 4,437 Bytes
324d4da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""
Preprocessing Script for ToF-360

Author: Mahdi Chamseddine (mahdi.chamseddine@dfki.de)
Please cite our work if the code is helpful to you.
"""
from pathlib import Path

import cv2
import numpy as np
import open3d as o3d


def map_label(label: int) -> int:

    match label:
        case 0 | 33 | 34:  # <UNK>
            return -1
        case 2 | 20 | 42:  # ceiling
            return 0
        case 3 | 18:  # floor
            return 1
        case 1 | 40:  # wall
            return 2
        # case 17:  # beam
        #     return 3
        case 14:  # column
            return 4
        case 8:  # window
            return 5
        case 7:  # door
            return 6
        case 12:  # table
            return 7
        case 5:  # chair
            return 8
        case 4:  # sofa
            return 9
        case 31:  # bookcase
            return 10
        # case 26:  # board
        #     return 11
        case _:  # clutter
            return 12


def downsample(xyz: np.ndarray, voxel_size: float = 0.01) -> np.ndarray:
    min_vals = np.min(xyz, axis=0)
    max_vals = np.max(xyz, axis=0)
    point_cloud = o3d.geometry.PointCloud()
    point_cloud.points = o3d.utility.Vector3dVector(xyz)
    _, _, indices = point_cloud.voxel_down_sample_and_trace(
        voxel_size, min_vals, max_vals
    )
    indices = [np.random.choice(idx) for idx in indices]
    return np.asarray(indices)


def preprocess_scans(area_path: Path) -> None:
    xyz_dir = area_path / "XYZ"
    for scan_path in xyz_dir.glob("*.npy*"):
        scan_name = scan_path.stem[: -len("_xxx")]
        parse_scan(scan_name, area_path)


def parse_scan(scan_name: str, area_path: Path, debug: bool = False):
    output_name = area_path.stem + "_" + scan_name
    print(f"Parsing scan: {output_name}", flush=True)
    processed_path = (
        area_path.parent.parent / "preprocessed" / area_path.parent.stem / output_name
    )
    # if processed_path.exists():
    #     return
    processed_path.mkdir(parents=True, exist_ok=True)

    print(f"--- [{output_name}] reading point cloud", flush=True)
    xyz_path = Path(area_path / "XYZ", scan_name + "_XYZ.npy")
    temp = np.load(xyz_path)

    temp = temp.reshape(-1, 3) / 1000  # mm to m
    coord = temp.copy()
    coord[:, 1] = temp[:, 2]
    coord[:, 2] = -temp[:, 1]
    png_path = Path(area_path / "RGB", scan_name + "_rgb.png")
    color = cv2.imread(png_path.resolve())
    color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB).reshape(-1, 3) / 255

    print(f"--- [{output_name}] loading labels", flush=True)
    semantic_path = Path(area_path / "semantics", scan_name + "_semantic.npy")
    segment = np.load(semantic_path).reshape(-1)
    segment = np.vectorize(map_label)(segment)

    normal_path = Path(area_path / "normal", scan_name + "_normal.png")
    temp = cv2.imread(normal_path.resolve())
    temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB).reshape(-1, 3) * 2 / 255
    temp = temp - 1
    normal = temp.copy()
    normal[:, 1] = temp[:, 2]
    normal[:, 2] = -temp[:, 1]

    print(f"--- [{output_name}] down sampling", flush=True)
    idx = downsample(coord)

    print(f"--- [{output_name}] saving", flush=True)
    coord = np.ascontiguousarray(coord[idx, :], dtype=np.float32)
    np.save(Path(processed_path, "coord.npy"), coord)
    color = np.ascontiguousarray(color[idx, :], dtype=np.float32)
    np.save(Path(processed_path, "color.npy"), color)
    normal = np.ascontiguousarray(normal[idx, :], dtype=np.float32)
    np.save(Path(processed_path, "normal.npy"), normal)
    segment = np.ascontiguousarray(segment[idx], dtype=np.int32)
    np.save(Path(processed_path, "segment.npy"), segment)


def main():
    # splits = ["test", "train", "val"]
    # splits = ["train"]
    splits = [""]
    dataset_directory = "path/to/ToF-360/"

    areas = ["Hospital", "Office_Room_1", "Office_Room_2", "Parking_Lot"]

    for split in splits:
        split_directory = dataset_directory + split
        split_path = Path(split_directory)
        # Check if the parent directory exists
        if not split_path.is_dir():
            print(
                f"Error: '{split_path.resolve()}' is not a valid directory.",
                flush=True,
            )
            return
        for area_path in split_path.iterdir():
            if area_path.is_dir() and area_path.stem in areas:
                preprocess_scans(area_path)


if __name__ == "__main__":
    main()