| import pickle |
| from pathlib import Path |
| import torch |
| import cv2 |
| import numpy as np |
| import os |
| import open3d as o3d |
| from tqdm import tqdm |
|
|
| def process_scene(scene_params, target_depth_shape=None): |
| images = scene_params["image_files"] |
| poses = scene_params["poses"] |
| depths = scene_params["depths"] |
| Ks = scene_params["Ks"] |
| pts3d = scene_params["pts3d"] |
| im_confs = scene_params["im_conf"] |
| print(scene_params.keys()) |
| im_shapes = scene_params["imshapes"] |
| im_shape = im_shapes[0] |
|
|
| image_hw = cv2.imread(images[0]).shape[:2] |
| image_scale = np.ones((3, 3)) |
| image_scale[0] *= image_hw[1] / im_shape[1] |
| image_scale[1] *= image_hw[0] / im_shape[0] |
|
|
| if target_depth_shape is None: |
| target_depth_shape = image_hw |
|
|
|
|
| depth_scale = np.ones((3, 3)) |
| depth_scale[0] *= target_depth_shape[1] / im_shape[1] |
| depth_scale[1] *= target_depth_shape[0] / im_shape[0] |
|
|
| data = [ |
| { |
| "image_path": image, |
| "pose": pose, |
| "depth": cv2.resize(depth.numpy(), target_depth_shape[::-1], interpolation=cv2.INTER_LINEAR), |
| "source_K": K, |
| "image_K": K * image_scale, |
| "depth_K": K * depth_scale, |
| "pts3d": pts, |
| "im_conf": im_conf, |
| "im_shape_target": image_hw, |
| "depth_shape_target": target_depth_shape, |
| "shape_original": im_shape, |
| } for image, pose, depth, K, pts, im_conf in zip(images, poses, depths, Ks, pts3d, im_confs) |
| ] |
|
|
| return data |
|
|
|
|
| def export_scene(scene_id, scene_params, processing_args): |
| data = process_scene(scene_params) |
| out_path = processing_args["out_dir"] / scene_id |
| K_color = data[0]["image_K"] |
| K_depth = data[0]["depth_K"] |
| |
| def proc_k(K): |
| res = np.eye(4) |
| res[:3, :3] = K[:3, :3] |
| return res |
| |
| K_color = proc_k(K_color) |
| K_depth = proc_k(K_depth) |
| intrinsics_path = out_path / "intrinsic" |
| intrinsics_path.mkdir(parents=True, exist_ok=True) |
| np.savetxt(intrinsics_path / "intrinsic_color.txt", K_color) |
| np.savetxt(intrinsics_path / "intrinsic_depth.txt", K_depth) |
|
|
| np.savetxt(intrinsics_path / "extrinsic_color.txt", np.eye(4)) |
| np.savetxt(intrinsics_path / "extrinsic_depth.txt", np.eye(4)) |
| all_pts = [] |
| all_colors = [] |
| for i, item in enumerate(data): |
| img_name = Path(item["image_path"]).stem |
| image_path = out_path / "color" / f"{img_name}.jpg" |
| image_path.parent.mkdir(parents=True, exist_ok=True) |
| try: |
| os.symlink(item["image_path"], image_path) |
| except FileExistsError: |
| pass |
| image = cv2.imread(item["image_path"]) |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
| image = cv2.resize(image, item['shape_original'][::-1]) / 255. |
| |
|
|
| depth_path = out_path / "depth" / f"{img_name}.png" |
| depth_path.parent.mkdir(parents=True, exist_ok=True) |
| cv2.imwrite(depth_path, (item["depth"] * 1000).astype(np.uint16)) |
|
|
| pose_path = out_path / "pose" / f"{img_name}.txt" |
| pose_path.parent.mkdir(parents=True, exist_ok=True) |
| np.savetxt(pose_path, item["pose"]) |
| pts = item["pts3d"][item["im_conf"] > processing_args["confidence_threshold"]] |
| image = image[item["im_conf"] > processing_args["confidence_threshold"]] |
|
|
| all_pts.append(pts.view(-1, 3)) |
| all_colors.append(image.reshape(-1, 3)) |
| all_pts = np.concatenate(all_pts, axis=0) |
| all_colors = np.concatenate(all_colors, axis=0) |
|
|
| pcd = o3d.geometry.PointCloud() |
| pcd.points = o3d.utility.Vector3dVector(all_pts) |
| pcd.colors = o3d.utility.Vector3dVector(all_colors) |
| pcd = pcd.voxel_down_sample(voxel_size=processing_args["voxel_size"]) |
| o3d.io.write_point_cloud(out_path / f"{scene_id}_vh_clean_2.ply", pcd) |
|
|
| |
| |
| |
| |
|
|
| processing_args = { |
| "confidence_threshold": 1, |
| "voxel_size": 0.025, |
| "out_dir": Path("data/arkit_dust3r_posed/processed"), |
| } |
|
|
|
|
| val_path = Path("../") / "OKNO/data/arkitscenes/arkitscenes_offline_infos_val.pkl" |
| out_dir = Path("data/arkit_dust3r_posed/processed") |
| with open(val_path, "rb") as f: |
| data = pickle.load(f) |
|
|
| data_list = data["data_list"] |
| val_scenes = [scene["lidar_points"]["lidar_path"] for scene in data_list] |
| def extract_name(item): |
| return item.split("_")[0] |
| val_scenes = [extract_name(scene) for scene in val_scenes] |
|
|
| dut3r_path = Path("/home/jovyan/users/lemeshko/Indoor/DUSt3R/res/arkit_posed") |
|
|
| for scene in tqdm(val_scenes): |
| scene_path = dut3r_path / scene |
| scene_params = torch.load(scene_path / "scene_params.pt") |
| export_scene(scene, scene_params, processing_args) |