Datasets:

Modalities:
Image
Text
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
XieNet / main.py
Leiyao-Cui's picture
Upload main.py with huggingface_hub
f0b9bb1 verified
import argparse
import json
import shutil
from pathlib import Path
import cv2
import numpy as np
import sapien
from scipy.spatial.transform import Rotation as R
from tqdm import tqdm
def set_seed(seed: int):
import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
def gen_camera_pose(latitudes: np.ndarray, longitudes: np.ndarray, radius: float = 2.0):
camera_poses = []
# (latitudes, longitudes)
angles = np.stack(
np.meshgrid(latitudes, longitudes, indexing="ij"), axis=-1
).reshape(-1, 2)
angles = np.deg2rad(angles)
xs = np.sin(angles[..., 0]) * np.cos(angles[..., 1]) * radius
ys = np.sin(angles[..., 0]) * np.sin(angles[..., 1]) * radius
zs = np.cos(angles[..., 0]) * radius
trans = np.stack([xs, ys, zs], axis=-1)
forwards = -trans / np.linalg.norm(trans, ord=2, axis=-1, keepdims=True)
lefts = np.cross([0.0, 0.0, 1.0], forwards)
lefts = lefts / np.linalg.norm(lefts, ord=2, axis=-1, keepdims=True)
ups = np.cross(forwards, lefts)
ups = ups / np.linalg.norm(ups, ord=2, axis=-1, keepdims=True)
rots = np.stack([forwards, lefts, ups], axis=2)
camera_poses = np.eye(4).reshape(1, 4, 4).repeat(angles.shape[0], axis=0)
camera_poses[:, :3, :3] = rots
camera_poses[:, :3, 3] = trans
return camera_poses
def get_obj_urdf_file(data_root_dir: str, include_objs: list[str]):
data_root_dir = Path(data_root_dir).resolve()
obj_urdf_files = []
for line in open(data_root_dir / "xienet_ids.txt", "r"):
obj, id = line.strip().split(" ")
if obj in include_objs:
obj_urdf_file = data_root_dir / id / "mobility_annotation_gapartnet.urdf"
obj_urdf_files.append(obj_urdf_file)
return obj_urdf_files
def setup_sapien(
render_width: int,
render_height: int,
fovy: float,
near: float,
far: float,
enable_rt: bool = False,
):
sapien.render.set_log_level("error")
if enable_rt:
sapien.render.set_camera_shader_dir("rt")
sapien.render.set_ray_tracing_samples_per_pixel(64)
sapien.render.set_ray_tracing_path_depth(16)
sapien.render.set_ray_tracing_denoiser("optix")
scene_config = sapien.physx.PhysxSceneConfig()
scene_config.gravity = np.array([0.0, 0.0, 0.0])
sapien.physx.set_scene_config(scene_config)
scene = sapien.Scene()
scene.set_timestep(1.0 / 100)
scene.set_ambient_light([0.5, 0.5, 0.5])
scene.add_directional_light([0.0, 1.0, -1.0], [0.5, 0.5, 0.5], shadow=True)
camera = scene.add_camera(
name="camera",
width=render_width,
height=render_height,
fovy=np.deg2rad(fovy),
near=near,
far=far,
)
return scene, camera
def gen_articulation_flow(
active_joints: dict,
camera_extrinsics: np.ndarray,
pcd_camera: np.ndarray,
mask_movable: np.ndarray,
max_flow_dist: float = 0.1,
):
flow = np.zeros_like(pcd_camera)
for joint_id, active_joint in active_joints.items():
valid_mask = mask_movable == joint_id
if not np.any(valid_mask):
continue
axis = active_joint["axis"] @ camera_extrinsics[:3, :3].T
origin = (
active_joint["origin"] @ camera_extrinsics[:3, :3].T
+ camera_extrinsics[:3, 3]
)
if active_joint["type"] in ["revolute", "revolute_unwrapped"]:
points_to_origin = pcd_camera[valid_mask] - origin
dist_points_to_axis = np.linalg.norm(
np.cross(points_to_origin, axis, axis=-1), ord=2, axis=-1
)
if dist_points_to_axis.size == 0:
continue
max_dist = np.max(dist_points_to_axis)
if max_dist == 0.0:
continue
angle = 2 * np.arcsin(max_flow_dist / np.max(dist_points_to_axis))
rot = R.from_rotvec((angle * axis).reshape(3)).as_matrix()
flow[valid_mask] = (
(pcd_camera[valid_mask] - origin) @ rot.T
+ origin
- pcd_camera[valid_mask]
)
elif active_joint["type"] == "prismatic":
flow[valid_mask] = axis * max_flow_dist
flow[np.isnan(flow)] = 0.0
if np.max(flow) == 0.0:
return None
else:
return flow
def save_data(save_dir: Path, data: dict, save_vis: bool = False):
save_dir.mkdir(parents=True, exist_ok=True)
fg_mask = data["fg_mask"]
data["mask_movable"][np.all(data["articulation_flow"] == 0.0, axis=-1)] = 0
num_points = np.count_nonzero(fg_mask)
npy_data = np.empty(
(num_points,),
dtype=np.dtype(
[
("point", np.float32, (3,)),
("rgb", np.uint8, (3,)),
("articulation_flow", np.float32, (3,)),
("mask_holdable", np.uint8, (1,)),
("mask_movable", np.uint8, (1,)),
("mask_ground", np.uint8, (1,)),
]
),
)
npy_data["point"] = data["pcd_camera"][fg_mask]
npy_data["rgb"] = data["rgb"][fg_mask]
npy_data["articulation_flow"] = data["articulation_flow"][fg_mask]
npy_data["mask_holdable"] = data["mask_holdable"][fg_mask][..., None]
npy_data["mask_movable"] = data["mask_movable"][fg_mask][..., None]
npy_data["mask_ground"] = data["mask_ground"][fg_mask][..., None]
np.save(save_dir / "pcd_camera.npy", npy_data)
extrinsics = np.eye(4)
extrinsics[:3] = data["camera_extrinsics"][:3]
camera_pose = np.linalg.inv(extrinsics)
np.savetxt(save_dir / "camera_pose.txt", camera_pose, fmt="%.6f", delimiter=",")
np.savetxt(
save_dir / "camera_intrinsics.txt",
data["camera_intrinsics"],
fmt="%.6f",
delimiter=",",
)
if save_vis:
vis_save_dir = save_dir / "vis"
save_data_vis(vis_save_dir, data)
def save_data_vis(vis_save_dir: Path, data: dict):
vis_save_dir.mkdir(parents=True, exist_ok=True)
color = cv2.cvtColor(data["rgb"], cv2.COLOR_RGB2BGR)
cv2.imwrite(vis_save_dir / "color.png", color)
depth = data["pcd_camera"][..., 2]
depth_vis = 255 - (depth / np.max(depth) * 255).astype(np.uint8)
depth_vis = cv2.applyColorMap(depth_vis, cv2.COLORMAP_JET)
cv2.imwrite(vis_save_dir / "depth.png", depth_vis)
mask_holdable_vis = data["mask_holdable"]
cv2.imwrite(vis_save_dir / "mask_holdable.png", mask_holdable_vis)
mask_movable_vis = data["mask_movable"]
cv2.imwrite(vis_save_dir / "mask_movable.png", mask_movable_vis)
mask_ground_vis = data["mask_ground"]
cv2.imwrite(vis_save_dir / "mask_ground.png", mask_ground_vis)
articulation_flow = data["articulation_flow"]
articulation_flow_dist = np.linalg.norm(articulation_flow, ord=2, axis=-1)
articulation_flow_dist_vis = (
(articulation_flow_dist / np.max(articulation_flow_dist)) * 255
).astype(np.uint8)
cv2.imwrite(vis_save_dir / "articulation_flow_dist.png", articulation_flow_dist_vis)
articulation_flow_dir = articulation_flow / (
np.clip(articulation_flow_dist[..., None], a_min=1e-6, a_max=None)
)
articulation_flow_dir_vis = ((articulation_flow_dir + 1) / 2 * 255).astype(np.uint8)
articulation_flow_dir_vis = cv2.cvtColor(
articulation_flow_dir_vis, cv2.COLOR_RGB2BGR
)
cv2.imwrite(vis_save_dir / "articulation_flow_dir.png", articulation_flow_dir_vis)
def load_obj_urdf(urdf_file: str, scene: sapien.Scene, disable_collision: bool = False):
loader = scene.create_urdf_loader()
obj = loader.load(urdf_file)
if disable_collision:
for link in obj.get_links():
for collision_shape in link.collision_shapes:
collision_shape.set_collision_groups([0, 0, 0xFFFFFFFF, 1])
obj.set_pose(sapien.Pose())
obj_qpos = []
for joint in obj.get_joints():
if joint.type not in ["prismatic", "revolute", "revolute_unwrapped"]:
continue
joint_limit = joint.get_limits()
if joint_limit.size == 0:
obj_qpos.append(0.0)
else:
obj_qpos.append(joint.get_limits()[0, 0])
obj.set_qpos(obj_qpos)
scene.step()
return obj
def render_obj_data(
scene: sapien.Scene,
camera: sapien.render.RenderCameraComponent,
camera_poses: np.ndarray,
obj_urdf_file: Path,
save_dir: Path,
include_holdable_links: list[str],
min_movable_area: int = 100,
max_flow_dist: float = 0.01,
save_vis: bool = False,
):
link_annos_file = obj_urdf_file.parent / "link_annotation_gapartnet.json"
link_annos: list[dict] = json.load(open(link_annos_file, "r"))
active_links = []
for link_anno in link_annos:
if link_anno["is_gapart"] and link_anno["category"] in include_holdable_links:
active_links.append(link_anno["link_name"])
if len(active_links) == 0:
return
obj = load_obj_urdf(obj_urdf_file.as_posix(), scene, disable_collision=True)
z_min = np.inf
for link in obj.get_links():
if len(link.collision_shapes) == 0:
continue
aabb = link.compute_global_aabb_tight()
if aabb[0, 2] < z_min:
z_min = aabb[0, 2]
ground = scene.add_ground(z_min - 0.001)
vis_ids_ground = [ground.get_per_scene_id()]
scene.step()
vis_ids_holdable_dict = {}
vis_ids_movable_dict = {}
active_joints = {}
num_holdable_links = 0
num_movable_links = 0
for link in obj.get_links():
if link.name not in active_links:
continue
num_holdable_links += 1
vis_ids_holdable_dict[num_holdable_links] = link.entity.per_scene_id
parent_link = link
vis_ids_movable = []
while True:
vis_ids_movable.append(parent_link.entity.per_scene_id)
joint = parent_link.get_joint()
if joint.type != "fixed":
num_movable_links += 1
joint_pose = joint.get_global_pose().to_transformation_matrix()
origin = joint_pose[:3, 3]
axis = (
joint_pose[:3, :3] @ np.array([1.0, 0.0, 0.0]).reshape(3, 1)
).reshape(3)
axis = axis / np.linalg.norm(axis, ord=2)
active_joints[num_movable_links] = {
"name": joint.name,
"type": joint.type,
"origin": origin.reshape(1, 3),
"axis": axis.reshape(1, 3),
}
vis_ids_movable_dict[num_movable_links] = vis_ids_movable
break
parent_link = joint.get_parent_link()
if parent_link is None:
break
rot_opengl2opencv = np.array(
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], dtype=np.float32
)
data_dict = {}
for i, camera_pose in enumerate(camera_poses):
camera.set_entity_pose(sapien.Pose(camera_pose))
scene.step()
scene.update_render()
camera.take_picture()
mask = camera.get_picture("Segmentation")[..., 1] # (H, W) uint32
fg_mask = mask > 0
mask_holdable = np.zeros(mask.shape, dtype=np.uint8)
mask_movable = np.zeros(mask.shape, dtype=np.uint8)
mask_ground = np.isin(mask, vis_ids_ground).astype(np.uint8)
for link_id, vis_ids in vis_ids_holdable_dict.items():
link_mask = np.isin(mask, vis_ids)
if np.sum(link_mask) > 0:
mask_holdable[link_mask] = link_id
for link_id, vis_ids in vis_ids_movable_dict.items():
link_mask = np.isin(mask, vis_ids)
if np.sum(link_mask) >= min_movable_area:
mask_movable[link_mask] = link_id
if not np.any(mask_movable > 0):
continue
pcd_camera = camera.get_picture("Position")[..., :3] # (H, W, 3) float32
pcd_camera = pcd_camera @ rot_opengl2opencv.T
pcd_camera[~fg_mask] = 0.0
camera_extrinsics = camera.get_extrinsic_matrix()
articulation_flow = gen_articulation_flow(
active_joints, camera_extrinsics, pcd_camera, mask_movable, max_flow_dist
)
if articulation_flow is None:
continue
intrinsics = camera.get_intrinsic_matrix() # (3, 3)
intrinsics[0, 2] -= 0.5
intrinsics[1, 2] -= 0.5
data_dict[i] = {
"camera_pose_sapien": camera_pose,
"camera_extrinsics": camera_extrinsics,
"camera_intrinsics": intrinsics,
"fg_mask": fg_mask,
"mask_holdable": mask_holdable,
"mask_movable": mask_movable,
"mask_ground": mask_ground,
"pcd_camera": pcd_camera,
"articulation_flow": articulation_flow,
}
scene.remove_articulation(obj)
obj = load_obj_urdf(
obj_urdf_file.as_posix().replace(
"mobility_annotation_gapartnet", "mobility_texture_gapartnet"
),
scene,
disable_collision=True,
)
for i in data_dict.keys():
camera_pose = data_dict[i]["camera_pose_sapien"]
camera.set_entity_pose(sapien.Pose(camera_pose))
scene.step()
scene.update_render()
camera.take_picture()
rgb = camera.get_picture("Color")[..., :3] # (H, W, 3) float32
rgb = np.clip(rgb * 255.0, 0.0, 255.0).astype(np.uint8)
data_dict[i]["rgb"] = rgb
scene.remove_articulation(obj)
scene.remove_actor(ground)
for i, data in data_dict.items():
save_data(save_dir / obj_urdf_file.parts[-2] / f"view_{i:04d}", data, save_vis)
def main(args):
set_seed(args.seed)
save_dir = Path(args.save_dir).resolve()
save_dir.mkdir(parents=True, exist_ok=True)
include_objs = [
"Dishwasher",
"Door",
"Microwave",
"Oven",
"Refrigerator",
"Safe",
"StorageFurniture",
"Table",
"Toilet",
"TrashCan",
"WashingMachine",
]
include_holdable_links = [
"line_fixed_handle",
"round_fixed_handle",
"hinge_handle",
]
shutil.copy2(Path(args.data_root_dir) / "xienet_ids.txt", save_dir)
obj_urdf_files = get_obj_urdf_file(args.data_root_dir, include_objs)
scene, camera = setup_sapien(
args.render_width,
args.render_height,
args.fovy,
args.near,
args.far,
args.enable_rt,
)
latitudes = np.linspace(40.0, 70.0, num=6, endpoint=True)
longitudes = np.linspace(150.0, 210.0, num=12, endpoint=True)
camera_poses = gen_camera_pose(latitudes, longitudes, radius=2.0)
pbar = tqdm(obj_urdf_files, desc="Rendering", dynamic_ncols=True)
for obj_urdf_file in pbar:
pbar.set_postfix({"obj": obj_urdf_file.parts[-2]})
if (save_dir / obj_urdf_file.parts[-2]).exists():
continue
try:
render_obj_data(
scene,
camera,
camera_poses,
obj_urdf_file,
save_dir,
include_holdable_links,
args.min_movable_area,
args.max_flow_dist,
args.save_vis,
)
except Exception as e:
print(f"Failed to render {obj_urdf_file}: {e}")
shutil.rmtree(save_dir / obj_urdf_file.parts[-2], ignore_errors=True)
continue
except KeyboardInterrupt:
print("Rendering interrupted.")
shutil.rmtree(save_dir / obj_urdf_file.parts[-2], ignore_errors=True)
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--data_root_dir", type=str, required=True)
parser.add_argument("--save_dir", type=str, required=True)
parser.add_argument("--render_width", type=int, default=640)
parser.add_argument("--render_height", type=int, default=576)
parser.add_argument("--fovy", type=float, default=65.0)
parser.add_argument("--near", type=float, default=0.01)
parser.add_argument("--far", type=float, default=4.0)
parser.add_argument("--enable_rt", action="store_true", default=False)
parser.add_argument("--min_movable_area", type=int, default=4096)
parser.add_argument("--max_flow_dist", type=float, default=0.1)
parser.add_argument("--save_vis", action="store_true", default=True)
args = parser.parse_args()
main(args)