Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
Β·
8fff686
1
Parent(s):
3f32df5
update
Browse files- common.py +1 -1
- embodied_gen/data/utils.py +1 -1
- embodied_gen/models/sam3d.py +4 -6
- embodied_gen/models/text_model.py +1 -1
- embodied_gen/scripts/gen_room.py +69 -0
- embodied_gen/scripts/imageto3d.py +10 -2
- embodied_gen/scripts/parallel_sim.py +1 -1
- embodied_gen/scripts/room_gen/custom_solve.gin +29 -0
- embodied_gen/scripts/room_gen/export_scene.py +1516 -0
- embodied_gen/scripts/room_gen/gen_room.py +253 -0
- embodied_gen/scripts/room_gen/route_room.py +135 -0
- embodied_gen/scripts/room_gen/run_generate_indoors.py +31 -0
- embodied_gen/scripts/room_gen/visualize_floorplan.py +1186 -0
- embodied_gen/trainer/pono2mesh_trainer.py +1 -1
- embodied_gen/utils/gpt_clients.py +2 -1
- embodied_gen/utils/inference.py +19 -1
- embodied_gen/utils/monkey_patch/infinigen.py +781 -0
- embodied_gen/utils/monkey_patch/maniskill.py +88 -0
- embodied_gen/utils/monkey_patch/pano2room.py +159 -0
- embodied_gen/utils/monkey_patch/sam3d.py +385 -0
- embodied_gen/utils/monkey_patch/trellis.py +83 -0
- embodied_gen/utils/simulation.py +48 -10
- embodied_gen/utils/tags.py +1 -1
- embodied_gen/validators/urdf_convertor.py +15 -8
common.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
import spaces
|
| 18 |
-
from embodied_gen.utils.
|
| 19 |
|
| 20 |
monkey_path_trellis()
|
| 21 |
|
|
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
import spaces
|
| 18 |
+
from embodied_gen.utils.monkey_patch.trellis import monkey_path_trellis
|
| 19 |
|
| 20 |
monkey_path_trellis()
|
| 21 |
|
embodied_gen/data/utils.py
CHANGED
|
@@ -963,6 +963,6 @@ def model_device_ctx(
|
|
| 963 |
|
| 964 |
if verbose:
|
| 965 |
model_names = [m.__class__.__name__ for m in models]
|
| 966 |
-
logger.
|
| 967 |
f"[model_device_ctx] {model_names} to cuda: {to_cuda_time:.1f}s, to cpu: {to_cpu_time:.1f}s"
|
| 968 |
)
|
|
|
|
| 963 |
|
| 964 |
if verbose:
|
| 965 |
model_names = [m.__class__.__name__ for m in models]
|
| 966 |
+
logger.info(
|
| 967 |
f"[model_device_ctx] {model_names} to cuda: {to_cuda_time:.1f}s, to cpu: {to_cpu_time:.1f}s"
|
| 968 |
)
|
embodied_gen/models/sam3d.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
| 14 |
# implied. See the License for the specific language governing
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
-
from embodied_gen.utils.
|
| 18 |
|
| 19 |
monkey_patch_sam3d()
|
| 20 |
import os
|
|
@@ -22,8 +22,7 @@ import sys
|
|
| 22 |
|
| 23 |
import numpy as np
|
| 24 |
from hydra.utils import instantiate
|
| 25 |
-
|
| 26 |
-
from huggingface_hub import snapshot_download
|
| 27 |
from omegaconf import OmegaConf
|
| 28 |
from PIL import Image
|
| 29 |
|
|
@@ -31,7 +30,7 @@ current_file_path = os.path.abspath(__file__)
|
|
| 31 |
current_dir = os.path.dirname(current_file_path)
|
| 32 |
sys.path.append(os.path.join(current_dir, "../.."))
|
| 33 |
from loguru import logger
|
| 34 |
-
from
|
| 35 |
InferencePipelinePointMap,
|
| 36 |
)
|
| 37 |
|
|
@@ -66,8 +65,7 @@ class Sam3dInference:
|
|
| 66 |
self, local_dir: str = "weights/sam-3d-objects", compile: bool = False
|
| 67 |
) -> None:
|
| 68 |
if not os.path.exists(local_dir):
|
| 69 |
-
|
| 70 |
-
snapshot_download("jetjodh/sam-3d-objects", local_dir=local_dir)
|
| 71 |
config_file = os.path.join(local_dir, "checkpoints/pipeline.yaml")
|
| 72 |
config = OmegaConf.load(config_file)
|
| 73 |
config.rendering_engine = "nvdiffrast"
|
|
|
|
| 14 |
# implied. See the License for the specific language governing
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
+
from embodied_gen.utils.monkey_patch.sam3d import monkey_patch_sam3d
|
| 18 |
|
| 19 |
monkey_patch_sam3d()
|
| 20 |
import os
|
|
|
|
| 22 |
|
| 23 |
import numpy as np
|
| 24 |
from hydra.utils import instantiate
|
| 25 |
+
from modelscope import snapshot_download
|
|
|
|
| 26 |
from omegaconf import OmegaConf
|
| 27 |
from PIL import Image
|
| 28 |
|
|
|
|
| 30 |
current_dir = os.path.dirname(current_file_path)
|
| 31 |
sys.path.append(os.path.join(current_dir, "../.."))
|
| 32 |
from loguru import logger
|
| 33 |
+
from sam3d_objects.pipeline.inference_pipeline_pointmap import (
|
| 34 |
InferencePipelinePointMap,
|
| 35 |
)
|
| 36 |
|
|
|
|
| 65 |
self, local_dir: str = "weights/sam-3d-objects", compile: bool = False
|
| 66 |
) -> None:
|
| 67 |
if not os.path.exists(local_dir):
|
| 68 |
+
snapshot_download("facebook/sam-3d-objects", local_dir=local_dir)
|
|
|
|
| 69 |
config_file = os.path.join(local_dir, "checkpoints/pipeline.yaml")
|
| 70 |
config = OmegaConf.load(config_file)
|
| 71 |
config.rendering_engine = "nvdiffrast"
|
embodied_gen/models/text_model.py
CHANGED
|
@@ -53,7 +53,7 @@ __all__ = [
|
|
| 53 |
]
|
| 54 |
|
| 55 |
PROMPT_APPEND = (
|
| 56 |
-
"Angled 3D view of one {object}, centered, no cropping, no occlusion, isolated product photo, "
|
| 57 |
"no surroundings, high-quality appearance, vivid colors, on a plain clean surface, 3D style revealing multiple surfaces"
|
| 58 |
)
|
| 59 |
PROMPT_KAPPEND = "Single {object}, in the center of the image, white background, 3D style, best quality"
|
|
|
|
| 53 |
]
|
| 54 |
|
| 55 |
PROMPT_APPEND = (
|
| 56 |
+
"Angled 3D view of one {object}, centered, no cropping, no occlusion, isolated product photo, placed horizontally, "
|
| 57 |
"no surroundings, high-quality appearance, vivid colors, on a plain clean surface, 3D style revealing multiple surfaces"
|
| 58 |
)
|
| 59 |
PROMPT_KAPPEND = "Single {object}, in the center of the image, white background, 3D style, best quality"
|
embodied_gen/scripts/gen_room.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import signal
|
| 19 |
+
import subprocess
|
| 20 |
+
import sys
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
|
| 23 |
+
from embodied_gen.utils.log import logger
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_blender_python_path():
|
| 27 |
+
"""Resolve path to Blender's bundled Python binary from env or default location."""
|
| 28 |
+
env_path = os.environ.get("BLENDER_PYTHON_BIN")
|
| 29 |
+
if env_path and os.path.exists(env_path):
|
| 30 |
+
return env_path
|
| 31 |
+
|
| 32 |
+
default_relative_path = Path(
|
| 33 |
+
"thirdparty/infinigen/blender/4.2/python/bin/python3.11"
|
| 34 |
+
)
|
| 35 |
+
if default_relative_path.exists():
|
| 36 |
+
return str(default_relative_path.resolve())
|
| 37 |
+
|
| 38 |
+
logger.error("Error: Could not find Blender Python binary.")
|
| 39 |
+
logger.error("Please set the BLENDER_PYTHON_BIN environment variable.")
|
| 40 |
+
sys.exit(1)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def entrypoint():
|
| 44 |
+
"""Entry point wrapper to execute script within Blender's Python environment."""
|
| 45 |
+
blender_python = get_blender_python_path()
|
| 46 |
+
args = sys.argv[1:]
|
| 47 |
+
process = subprocess.Popen([blender_python] + args, start_new_session=True)
|
| 48 |
+
try:
|
| 49 |
+
return_code = process.wait()
|
| 50 |
+
sys.exit(return_code)
|
| 51 |
+
|
| 52 |
+
except KeyboardInterrupt:
|
| 53 |
+
try:
|
| 54 |
+
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
| 55 |
+
except ProcessLookupError:
|
| 56 |
+
pass
|
| 57 |
+
logger.error("\nProgram interrupted by user (Cmd+C). Exiting.")
|
| 58 |
+
sys.exit(130)
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Error: {e}")
|
| 61 |
+
try:
|
| 62 |
+
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
| 63 |
+
except Exception:
|
| 64 |
+
pass
|
| 65 |
+
sys.exit(1)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
entrypoint()
|
embodied_gen/scripts/imageto3d.py
CHANGED
|
@@ -105,7 +105,7 @@ def parse_args():
|
|
| 105 |
parser.add_argument(
|
| 106 |
"--n_retry",
|
| 107 |
type=int,
|
| 108 |
-
default=
|
| 109 |
)
|
| 110 |
parser.add_argument("--disable_decompose_convex", action="store_true")
|
| 111 |
parser.add_argument("--texture_size", type=int, default=2048)
|
|
@@ -153,6 +153,7 @@ def entrypoint(**kwargs):
|
|
| 153 |
|
| 154 |
seed = args.seed
|
| 155 |
asset_node = "unknown"
|
|
|
|
| 156 |
if isinstance(args.asset_type, list) and args.asset_type[idx]:
|
| 157 |
asset_node = args.asset_type[idx]
|
| 158 |
for try_idx in range(args.n_retry):
|
|
@@ -163,7 +164,10 @@ def entrypoint(**kwargs):
|
|
| 163 |
outputs = image3d_model_infer(PIPELINE, seg_image, seed)
|
| 164 |
except Exception as e:
|
| 165 |
logger.error(
|
| 166 |
-
f"[
|
|
|
|
|
|
|
|
|
|
| 167 |
)
|
| 168 |
continue
|
| 169 |
|
|
@@ -208,6 +212,10 @@ def entrypoint(**kwargs):
|
|
| 208 |
|
| 209 |
seed = random.randint(0, 100000) if seed is not None else None
|
| 210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
# Render the video for generated 3D asset.
|
| 212 |
color_images = render_video(gs_model, r=1.85)["color"]
|
| 213 |
normal_images = render_video(mesh_model, r=1.85)["normal"]
|
|
|
|
| 105 |
parser.add_argument(
|
| 106 |
"--n_retry",
|
| 107 |
type=int,
|
| 108 |
+
default=3,
|
| 109 |
)
|
| 110 |
parser.add_argument("--disable_decompose_convex", action="store_true")
|
| 111 |
parser.add_argument("--texture_size", type=int, default=2048)
|
|
|
|
| 153 |
|
| 154 |
seed = args.seed
|
| 155 |
asset_node = "unknown"
|
| 156 |
+
gs_model = None
|
| 157 |
if isinstance(args.asset_type, list) and args.asset_type[idx]:
|
| 158 |
asset_node = args.asset_type[idx]
|
| 159 |
for try_idx in range(args.n_retry):
|
|
|
|
| 164 |
outputs = image3d_model_infer(PIPELINE, seg_image, seed)
|
| 165 |
except Exception as e:
|
| 166 |
logger.error(
|
| 167 |
+
f"[Image3D Failed] process {image_path}: {e}, retry: {try_idx+1}/{args.n_retry}"
|
| 168 |
+
)
|
| 169 |
+
seed = (
|
| 170 |
+
random.randint(0, 100000) if seed is not None else None
|
| 171 |
)
|
| 172 |
continue
|
| 173 |
|
|
|
|
| 212 |
|
| 213 |
seed = random.randint(0, 100000) if seed is not None else None
|
| 214 |
|
| 215 |
+
if gs_model is None:
|
| 216 |
+
logger.error(f"Exceed image3d retry num, skip {image_path}.")
|
| 217 |
+
continue
|
| 218 |
+
|
| 219 |
# Render the video for generated 3D asset.
|
| 220 |
color_images = render_video(gs_model, r=1.85)["color"]
|
| 221 |
normal_images = render_video(mesh_model, r=1.85)["normal"]
|
embodied_gen/scripts/parallel_sim.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
|
| 18 |
-
from embodied_gen.utils.
|
| 19 |
|
| 20 |
monkey_patch_maniskill()
|
| 21 |
import json
|
|
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
|
| 18 |
+
from embodied_gen.utils.monkey_patch.maniskill import monkey_patch_maniskill
|
| 19 |
|
| 20 |
monkey_patch_maniskill()
|
| 21 |
import json
|
embodied_gen/scripts/room_gen/custom_solve.gin
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FloorPlanSolver.n_divide_trials = 25
|
| 2 |
+
FloorPlanSolver.iters_mult = 50
|
| 3 |
+
|
| 4 |
+
home_room_constraints.has_fewer_rooms = False
|
| 5 |
+
|
| 6 |
+
compose_indoors.place_cameras = False
|
| 7 |
+
|
| 8 |
+
solve_objects.addition_weight_scalar = 3.0
|
| 9 |
+
compose_indoors.solve_steps_large = 30
|
| 10 |
+
compose_indoors.solve_steps_medium = 20
|
| 11 |
+
compose_indoors.solve_steps_small = 5
|
| 12 |
+
|
| 13 |
+
compose_indoors.invisible_room_ceilings_enabled = False
|
| 14 |
+
compose_indoors.hide_other_rooms_enabled = True
|
| 15 |
+
|
| 16 |
+
compose_indoors.terrain_enabled = False
|
| 17 |
+
|
| 18 |
+
compose_indoors.pose_cameras_enabled = False
|
| 19 |
+
compose_indoors.animate_cameras_enabled = False
|
| 20 |
+
compose_indoors.overhead_cam_enabled = True
|
| 21 |
+
|
| 22 |
+
compose_indoors.nature_backdrop_enabled = False
|
| 23 |
+
|
| 24 |
+
compose_indoors.lights_off_chance = 0.0
|
| 25 |
+
|
| 26 |
+
compose_indoors.skirting_floor_enabled = False
|
| 27 |
+
compose_indoors.skirting_ceiling_enabled = False
|
| 28 |
+
BlueprintSolidifier.enable_open = False
|
| 29 |
+
|
embodied_gen/scripts/room_gen/export_scene.py
ADDED
|
@@ -0,0 +1,1516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
# Some code comes from: https://github.com/princeton-vl/infinigen/blob/main/infinigen/tools/export.py
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import logging
|
| 20 |
+
import math
|
| 21 |
+
import os
|
| 22 |
+
import shutil
|
| 23 |
+
import subprocess
|
| 24 |
+
from collections import defaultdict
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from typing import Dict, List, Optional, Tuple
|
| 27 |
+
|
| 28 |
+
import bpy
|
| 29 |
+
import coacd
|
| 30 |
+
import gin
|
| 31 |
+
import numpy as np
|
| 32 |
+
import trimesh
|
| 33 |
+
from infinigen.core.util import blender as butil
|
| 34 |
+
|
| 35 |
+
logger = logging.getLogger(__name__)
|
| 36 |
+
|
| 37 |
+
FORMAT_CHOICES = ["fbx", "obj", "usdc", "usda", "stl", "ply"]
|
| 38 |
+
BAKE_TYPES = {
|
| 39 |
+
"DIFFUSE": "Base Color",
|
| 40 |
+
"ROUGHNESS": "Roughness",
|
| 41 |
+
"NORMAL": "Normal",
|
| 42 |
+
} # "EMIT":"Emission Color" # "GLOSSY": "Specular IOR Level", "TRANSMISSION":"Transmission Weight" don"t export
|
| 43 |
+
SPECIAL_BAKE = {"METAL": "Metallic", "TRANSMISSION": "Transmission Weight"}
|
| 44 |
+
ALL_BAKE = BAKE_TYPES | SPECIAL_BAKE
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def apply_all_modifiers(obj):
|
| 48 |
+
for mod in obj.modifiers:
|
| 49 |
+
if mod is None:
|
| 50 |
+
continue
|
| 51 |
+
try:
|
| 52 |
+
obj.select_set(True)
|
| 53 |
+
bpy.context.view_layer.objects.active = obj
|
| 54 |
+
bpy.ops.object.modifier_apply(modifier=mod.name)
|
| 55 |
+
logger.info(f"Applied modifier {mod} on {obj}")
|
| 56 |
+
obj.select_set(False)
|
| 57 |
+
except RuntimeError:
|
| 58 |
+
logger.info(f"Can't apply {mod} on {obj}")
|
| 59 |
+
obj.select_set(False)
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def realizeInstances(obj):
|
| 64 |
+
for mod in obj.modifiers:
|
| 65 |
+
if mod is None or mod.type != "NODES":
|
| 66 |
+
continue
|
| 67 |
+
geo_group = mod.node_group
|
| 68 |
+
outputNode = geo_group.nodes["Group Output"]
|
| 69 |
+
|
| 70 |
+
logger.info(f"Realizing instances on {mod}")
|
| 71 |
+
link = outputNode.inputs[0].links[0]
|
| 72 |
+
from_socket = link.from_socket
|
| 73 |
+
geo_group.links.remove(link)
|
| 74 |
+
realizeNode = geo_group.nodes.new(type="GeometryNodeRealizeInstances")
|
| 75 |
+
geo_group.links.new(realizeNode.inputs[0], from_socket)
|
| 76 |
+
geo_group.links.new(outputNode.inputs[0], realizeNode.outputs[0])
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def remove_shade_smooth(obj):
|
| 80 |
+
for mod in obj.modifiers:
|
| 81 |
+
if mod is None or mod.type != "NODES":
|
| 82 |
+
continue
|
| 83 |
+
geo_group = mod.node_group
|
| 84 |
+
outputNode = geo_group.nodes["Group Output"]
|
| 85 |
+
if geo_group.nodes.get("Set Shade Smooth"):
|
| 86 |
+
logger.info("Removing shade smooth on " + obj.name)
|
| 87 |
+
smooth_node = geo_group.nodes["Set Shade Smooth"]
|
| 88 |
+
else:
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
link = smooth_node.inputs[0].links[0]
|
| 92 |
+
from_socket = link.from_socket
|
| 93 |
+
geo_group.links.remove(link)
|
| 94 |
+
geo_group.links.new(outputNode.inputs[0], from_socket)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def check_material_geonode(node_tree):
|
| 98 |
+
if node_tree.nodes.get("Set Material"):
|
| 99 |
+
logger.info("Found set material!")
|
| 100 |
+
return True
|
| 101 |
+
|
| 102 |
+
for node in node_tree.nodes:
|
| 103 |
+
if node.type == "GROUP" and check_material_geonode(node.node_tree):
|
| 104 |
+
return True
|
| 105 |
+
|
| 106 |
+
return False
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def handle_geo_modifiers(obj, export_usd):
|
| 110 |
+
has_geo_nodes = False
|
| 111 |
+
for mod in obj.modifiers:
|
| 112 |
+
if mod is None or mod.type != "NODES":
|
| 113 |
+
continue
|
| 114 |
+
has_geo_nodes = True
|
| 115 |
+
|
| 116 |
+
if has_geo_nodes and not obj.data.materials:
|
| 117 |
+
mat = bpy.data.materials.new(name=f"{mod.name} shader")
|
| 118 |
+
obj.data.materials.append(mat)
|
| 119 |
+
mat.use_nodes = True
|
| 120 |
+
mat.node_tree.nodes.remove(mat.node_tree.nodes["Principled BSDF"])
|
| 121 |
+
|
| 122 |
+
if not export_usd:
|
| 123 |
+
realizeInstances(obj)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def split_glass_mats():
|
| 127 |
+
split_objs = []
|
| 128 |
+
for obj in bpy.data.objects:
|
| 129 |
+
if obj.hide_render or obj.hide_viewport:
|
| 130 |
+
continue
|
| 131 |
+
if any(
|
| 132 |
+
exclude in obj.name
|
| 133 |
+
for exclude in [
|
| 134 |
+
"BowlFactory",
|
| 135 |
+
"CupFactory",
|
| 136 |
+
"OvenFactory",
|
| 137 |
+
"BottleFactory",
|
| 138 |
+
]
|
| 139 |
+
):
|
| 140 |
+
continue
|
| 141 |
+
for slot in obj.material_slots:
|
| 142 |
+
mat = slot.material
|
| 143 |
+
if mat is None:
|
| 144 |
+
continue
|
| 145 |
+
if (
|
| 146 |
+
"shader_glass" in mat.name or "shader_lamp_bulb" in mat.name
|
| 147 |
+
) and len(obj.material_slots) >= 2:
|
| 148 |
+
logger.info(f"Splitting {obj}")
|
| 149 |
+
obj.select_set(True)
|
| 150 |
+
bpy.context.view_layer.objects.active = obj
|
| 151 |
+
bpy.ops.object.mode_set(mode="EDIT")
|
| 152 |
+
bpy.ops.mesh.separate(type="MATERIAL")
|
| 153 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 154 |
+
obj.select_set(False)
|
| 155 |
+
split_objs.append(obj.name)
|
| 156 |
+
break
|
| 157 |
+
|
| 158 |
+
matches = [
|
| 159 |
+
obj
|
| 160 |
+
for split_obj in split_objs
|
| 161 |
+
for obj in bpy.data.objects
|
| 162 |
+
if split_obj in obj.name
|
| 163 |
+
]
|
| 164 |
+
for match in matches:
|
| 165 |
+
if len(match.material_slots) == 0 or match.material_slots[0] is None:
|
| 166 |
+
continue
|
| 167 |
+
mat = match.material_slots[0].material
|
| 168 |
+
if mat is None:
|
| 169 |
+
continue
|
| 170 |
+
if "shader_glass" in mat.name or "shader_lamp_bulb" in mat.name:
|
| 171 |
+
match.name = f"{match.name}_SPLIT_GLASS"
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def clean_names(obj=None):
|
| 175 |
+
if obj is not None:
|
| 176 |
+
obj.name = (obj.name).replace(" ", "_")
|
| 177 |
+
obj.name = (obj.name).replace(".", "_")
|
| 178 |
+
|
| 179 |
+
if obj.type == "MESH":
|
| 180 |
+
for uv_map in obj.data.uv_layers:
|
| 181 |
+
uv_map.name = uv_map.name.replace(".", "_")
|
| 182 |
+
|
| 183 |
+
for mat in bpy.data.materials:
|
| 184 |
+
if mat is None:
|
| 185 |
+
continue
|
| 186 |
+
mat.name = (mat.name).replace(" ", "_")
|
| 187 |
+
mat.name = (mat.name).replace(".", "_")
|
| 188 |
+
|
| 189 |
+
for slot in obj.material_slots:
|
| 190 |
+
mat = slot.material
|
| 191 |
+
if mat is None:
|
| 192 |
+
continue
|
| 193 |
+
mat.name = (mat.name).replace(" ", "_")
|
| 194 |
+
mat.name = (mat.name).replace(".", "_")
|
| 195 |
+
return
|
| 196 |
+
|
| 197 |
+
for obj in bpy.data.objects:
|
| 198 |
+
obj.name = (obj.name).replace(" ", "_")
|
| 199 |
+
obj.name = (obj.name).replace(".", "_")
|
| 200 |
+
|
| 201 |
+
if obj.type == "MESH":
|
| 202 |
+
for uv_map in obj.data.uv_layers:
|
| 203 |
+
uv_map.name = uv_map.name.replace(
|
| 204 |
+
".", "_"
|
| 205 |
+
) # if uv has "." in name the node will export wrong in USD
|
| 206 |
+
|
| 207 |
+
for mat in bpy.data.materials:
|
| 208 |
+
if mat is None:
|
| 209 |
+
continue
|
| 210 |
+
mat.name = (mat.name).replace(" ", "_")
|
| 211 |
+
mat.name = (mat.name).replace(".", "_")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def remove_obj_parents(obj=None):
|
| 215 |
+
if obj is not None:
|
| 216 |
+
old_location = obj.matrix_world.to_translation()
|
| 217 |
+
obj.parent = None
|
| 218 |
+
obj.matrix_world.translation = old_location
|
| 219 |
+
return
|
| 220 |
+
|
| 221 |
+
for obj in bpy.data.objects:
|
| 222 |
+
old_location = obj.matrix_world.to_translation()
|
| 223 |
+
obj.parent = None
|
| 224 |
+
obj.matrix_world.translation = old_location
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def delete_objects():
|
| 228 |
+
logger.info("Deleting placeholders collection")
|
| 229 |
+
collection_name = "placeholders"
|
| 230 |
+
collection = bpy.data.collections.get(collection_name)
|
| 231 |
+
|
| 232 |
+
if collection:
|
| 233 |
+
for scene in bpy.data.scenes:
|
| 234 |
+
if collection.name in scene.collection.children:
|
| 235 |
+
scene.collection.children.unlink(collection)
|
| 236 |
+
|
| 237 |
+
for obj in collection.objects:
|
| 238 |
+
bpy.data.objects.remove(obj, do_unlink=True)
|
| 239 |
+
|
| 240 |
+
def delete_child_collections(parent_collection):
|
| 241 |
+
for child_collection in parent_collection.children:
|
| 242 |
+
delete_child_collections(child_collection)
|
| 243 |
+
bpy.data.collections.remove(child_collection)
|
| 244 |
+
|
| 245 |
+
delete_child_collections(collection)
|
| 246 |
+
bpy.data.collections.remove(collection)
|
| 247 |
+
|
| 248 |
+
if bpy.data.objects.get("Grid"):
|
| 249 |
+
bpy.data.objects.remove(bpy.data.objects["Grid"], do_unlink=True)
|
| 250 |
+
|
| 251 |
+
if bpy.data.objects.get("atmosphere"):
|
| 252 |
+
bpy.data.objects.remove(bpy.data.objects["atmosphere"], do_unlink=True)
|
| 253 |
+
|
| 254 |
+
if bpy.data.objects.get("KoleClouds"):
|
| 255 |
+
bpy.data.objects.remove(bpy.data.objects["KoleClouds"], do_unlink=True)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def rename_all_meshes(obj=None):
|
| 259 |
+
if obj is not None:
|
| 260 |
+
if obj.data and obj.data.users == 1:
|
| 261 |
+
obj.data.name = obj.name
|
| 262 |
+
return
|
| 263 |
+
|
| 264 |
+
for obj in bpy.data.objects:
|
| 265 |
+
if obj.data and obj.data.users == 1:
|
| 266 |
+
obj.data.name = obj.name
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def update_visibility():
|
| 270 |
+
outliner_area = next(
|
| 271 |
+
a for a in bpy.context.screen.areas if a.type == "OUTLINER"
|
| 272 |
+
)
|
| 273 |
+
space = outliner_area.spaces[0]
|
| 274 |
+
space.show_restrict_column_viewport = (
|
| 275 |
+
True # Global visibility (Monitor icon)
|
| 276 |
+
)
|
| 277 |
+
collection_view = {}
|
| 278 |
+
obj_view = {}
|
| 279 |
+
for collection in bpy.data.collections:
|
| 280 |
+
collection_view[collection] = collection.hide_render
|
| 281 |
+
collection.hide_viewport = False # reenables viewports for all
|
| 282 |
+
collection.hide_render = False # enables renders for all collections
|
| 283 |
+
|
| 284 |
+
# disables viewports and renders for all objs
|
| 285 |
+
for obj in bpy.data.objects:
|
| 286 |
+
obj_view[obj] = obj.hide_render
|
| 287 |
+
obj.hide_viewport = True
|
| 288 |
+
obj.hide_render = True
|
| 289 |
+
obj.hide_set(0)
|
| 290 |
+
|
| 291 |
+
return collection_view, obj_view
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def uv_unwrap(obj):
|
| 295 |
+
obj.select_set(True)
|
| 296 |
+
bpy.context.view_layer.objects.active = obj
|
| 297 |
+
|
| 298 |
+
obj.data.uv_layers.new(name="ExportUV")
|
| 299 |
+
bpy.context.object.data.uv_layers["ExportUV"].active = True
|
| 300 |
+
|
| 301 |
+
logger.info("UV Unwrapping")
|
| 302 |
+
bpy.ops.object.mode_set(mode="EDIT")
|
| 303 |
+
bpy.ops.mesh.select_all(action="SELECT")
|
| 304 |
+
try:
|
| 305 |
+
bpy.ops.uv.smart_project(angle_limit=0.7)
|
| 306 |
+
except RuntimeError:
|
| 307 |
+
logger.info("UV Unwrap failed, skipping mesh")
|
| 308 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 309 |
+
obj.select_set(False)
|
| 310 |
+
return False
|
| 311 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 312 |
+
obj.select_set(False)
|
| 313 |
+
return True
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def bakeVertexColors(obj):
|
| 317 |
+
logger.info(f"Baking vertex color on {obj}")
|
| 318 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 319 |
+
obj.select_set(True)
|
| 320 |
+
bpy.context.view_layer.objects.active = obj
|
| 321 |
+
vertColor = bpy.context.object.data.color_attributes.new(
|
| 322 |
+
name="VertColor", domain="CORNER", type="BYTE_COLOR"
|
| 323 |
+
)
|
| 324 |
+
bpy.context.object.data.attributes.active_color = vertColor
|
| 325 |
+
bpy.ops.object.bake(
|
| 326 |
+
type="DIFFUSE", pass_filter={"COLOR"}, target="VERTEX_COLORS"
|
| 327 |
+
)
|
| 328 |
+
obj.select_set(False)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def apply_baked_tex(obj, paramDict={}):
|
| 332 |
+
bpy.context.view_layer.objects.active = obj
|
| 333 |
+
bpy.context.object.data.uv_layers["ExportUV"].active_render = True
|
| 334 |
+
for uv_layer in reversed(obj.data.uv_layers):
|
| 335 |
+
if "ExportUV" not in uv_layer.name:
|
| 336 |
+
logger.info(f"Removed extraneous UV Layer {uv_layer}")
|
| 337 |
+
obj.data.uv_layers.remove(uv_layer)
|
| 338 |
+
|
| 339 |
+
for slot in obj.material_slots:
|
| 340 |
+
mat = slot.material
|
| 341 |
+
if mat is None:
|
| 342 |
+
continue
|
| 343 |
+
mat.use_nodes = True
|
| 344 |
+
nodes = mat.node_tree.nodes
|
| 345 |
+
logger.info("Reapplying baked texs on " + mat.name)
|
| 346 |
+
|
| 347 |
+
# delete all nodes except baked nodes and bsdf
|
| 348 |
+
excludedNodes = [type + "_node" for type in ALL_BAKE]
|
| 349 |
+
excludedNodes.extend(["Material Output", "Principled BSDF"])
|
| 350 |
+
for n in nodes:
|
| 351 |
+
if n.name not in excludedNodes:
|
| 352 |
+
nodes.remove(
|
| 353 |
+
n
|
| 354 |
+
) # deletes an arbitrary principled BSDF in the case of a mix, which is handled below
|
| 355 |
+
|
| 356 |
+
output = nodes["Material Output"]
|
| 357 |
+
|
| 358 |
+
# stick baked texture in material
|
| 359 |
+
if nodes.get("Principled BSDF") is None: # no bsdf
|
| 360 |
+
logger.info("No BSDF, creating new one")
|
| 361 |
+
principled_bsdf_node = nodes.new("ShaderNodeBsdfPrincipled")
|
| 362 |
+
elif (
|
| 363 |
+
len(output.inputs[0].links) != 0
|
| 364 |
+
and output.inputs[0].links[0].from_node.bl_idname
|
| 365 |
+
== "ShaderNodeBsdfPrincipled"
|
| 366 |
+
): # trivial bsdf graph
|
| 367 |
+
logger.info("Trivial shader graph, using old BSDF")
|
| 368 |
+
principled_bsdf_node = nodes["Principled BSDF"]
|
| 369 |
+
else:
|
| 370 |
+
logger.info("Non-trivial shader graph, creating new BSDF")
|
| 371 |
+
nodes.remove(
|
| 372 |
+
nodes["Principled BSDF"]
|
| 373 |
+
) # shader graph was a mix of bsdfs
|
| 374 |
+
principled_bsdf_node = nodes.new("ShaderNodeBsdfPrincipled")
|
| 375 |
+
|
| 376 |
+
links = mat.node_tree.links
|
| 377 |
+
|
| 378 |
+
# create the new shader node links
|
| 379 |
+
links.new(output.inputs[0], principled_bsdf_node.outputs[0])
|
| 380 |
+
for type in ALL_BAKE:
|
| 381 |
+
if not nodes.get(type + "_node"):
|
| 382 |
+
continue
|
| 383 |
+
tex_node = nodes[type + "_node"]
|
| 384 |
+
if type == "NORMAL":
|
| 385 |
+
normal_node = nodes.new("ShaderNodeNormalMap")
|
| 386 |
+
links.new(normal_node.inputs["Color"], tex_node.outputs[0])
|
| 387 |
+
links.new(
|
| 388 |
+
principled_bsdf_node.inputs[ALL_BAKE[type]],
|
| 389 |
+
normal_node.outputs[0],
|
| 390 |
+
)
|
| 391 |
+
continue
|
| 392 |
+
links.new(
|
| 393 |
+
principled_bsdf_node.inputs[ALL_BAKE[type]],
|
| 394 |
+
tex_node.outputs[0],
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
# bring back cleared param values
|
| 398 |
+
if mat.name in paramDict:
|
| 399 |
+
principled_bsdf_node.inputs["Metallic"].default_value = paramDict[
|
| 400 |
+
mat.name
|
| 401 |
+
]["Metallic"]
|
| 402 |
+
principled_bsdf_node.inputs["Sheen Weight"].default_value = (
|
| 403 |
+
paramDict[mat.name]["Sheen Weight"]
|
| 404 |
+
)
|
| 405 |
+
principled_bsdf_node.inputs["Coat Weight"].default_value = (
|
| 406 |
+
paramDict[mat.name]["Coat Weight"]
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def create_glass_shader(node_tree, export_usd):
|
| 411 |
+
nodes = node_tree.nodes
|
| 412 |
+
if nodes.get("Glass BSDF"):
|
| 413 |
+
color = nodes["Glass BSDF"].inputs[0].default_value
|
| 414 |
+
roughness = nodes["Glass BSDF"].inputs[1].default_value
|
| 415 |
+
ior = nodes["Glass BSDF"].inputs[2].default_value
|
| 416 |
+
|
| 417 |
+
if nodes.get("Principled BSDF"):
|
| 418 |
+
nodes.remove(nodes["Principled BSDF"])
|
| 419 |
+
|
| 420 |
+
principled_bsdf_node = nodes.new("ShaderNodeBsdfPrincipled")
|
| 421 |
+
|
| 422 |
+
if nodes.get("Glass BSDF"):
|
| 423 |
+
principled_bsdf_node.inputs["Base Color"].default_value = color
|
| 424 |
+
principled_bsdf_node.inputs["Roughness"].default_value = roughness
|
| 425 |
+
principled_bsdf_node.inputs["IOR"].default_value = ior
|
| 426 |
+
else:
|
| 427 |
+
principled_bsdf_node.inputs["Roughness"].default_value = 0
|
| 428 |
+
|
| 429 |
+
principled_bsdf_node.inputs["Transmission Weight"].default_value = 1
|
| 430 |
+
if export_usd:
|
| 431 |
+
principled_bsdf_node.inputs["Alpha"].default_value = 0
|
| 432 |
+
node_tree.links.new(
|
| 433 |
+
principled_bsdf_node.outputs[0], nodes["Material Output"].inputs[0]
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def process_glass_materials(obj, export_usd):
|
| 438 |
+
for slot in obj.material_slots:
|
| 439 |
+
mat = slot.material
|
| 440 |
+
if mat is None or not mat.use_nodes:
|
| 441 |
+
continue
|
| 442 |
+
nodes = mat.node_tree.nodes
|
| 443 |
+
outputNode = nodes["Material Output"]
|
| 444 |
+
if nodes.get("Glass BSDF"):
|
| 445 |
+
if (
|
| 446 |
+
outputNode.inputs[0].links[0].from_node.bl_idname
|
| 447 |
+
== "ShaderNodeBsdfGlass"
|
| 448 |
+
):
|
| 449 |
+
logger.info(f"Creating glass material on {obj.name}")
|
| 450 |
+
else:
|
| 451 |
+
logger.info(
|
| 452 |
+
f"Non-trivial glass material on {obj.name}, material export will be inaccurate"
|
| 453 |
+
)
|
| 454 |
+
create_glass_shader(mat.node_tree, export_usd)
|
| 455 |
+
elif "glass" in mat.name or "shader_lamp_bulb" in mat.name:
|
| 456 |
+
logger.info(f"Creating glass material on {obj.name}")
|
| 457 |
+
create_glass_shader(mat.node_tree, export_usd)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def bake_pass(
|
| 461 |
+
obj, dest: Path, img_size, bake_type, export_usd, export_name=None
|
| 462 |
+
):
|
| 463 |
+
if export_name is None:
|
| 464 |
+
img = bpy.data.images.new(
|
| 465 |
+
f"{obj.name}_{bake_type}", img_size, img_size
|
| 466 |
+
)
|
| 467 |
+
clean_name = (
|
| 468 |
+
(obj.name).replace(" ", "_").replace(".", "_").replace("/", "_")
|
| 469 |
+
)
|
| 470 |
+
clean_name = (
|
| 471 |
+
clean_name.replace("(", "_").replace(")", "").replace("-", "_")
|
| 472 |
+
)
|
| 473 |
+
file_path = dest / f"{clean_name}_{bake_type}.png"
|
| 474 |
+
else:
|
| 475 |
+
img = bpy.data.images.new(
|
| 476 |
+
f"{export_name}_{bake_type}", img_size, img_size
|
| 477 |
+
)
|
| 478 |
+
file_path = dest / f"{export_name}_{bake_type}.png"
|
| 479 |
+
dest = dest / "textures"
|
| 480 |
+
|
| 481 |
+
bake_obj = False
|
| 482 |
+
bake_exclude_mats = {}
|
| 483 |
+
|
| 484 |
+
# materials are stored as stack so when removing traverse the reversed list
|
| 485 |
+
for index, slot in reversed(list(enumerate(obj.material_slots))):
|
| 486 |
+
mat = slot.material
|
| 487 |
+
if mat is None:
|
| 488 |
+
bpy.context.object.active_material_index = index
|
| 489 |
+
bpy.ops.object.material_slot_remove()
|
| 490 |
+
continue
|
| 491 |
+
|
| 492 |
+
logger.info(mat.name)
|
| 493 |
+
mat.use_nodes = True
|
| 494 |
+
nodes = mat.node_tree.nodes
|
| 495 |
+
|
| 496 |
+
output = nodes["Material Output"]
|
| 497 |
+
|
| 498 |
+
img_node = nodes.new("ShaderNodeTexImage")
|
| 499 |
+
img_node.name = f"{bake_type}_node"
|
| 500 |
+
img_node.image = img
|
| 501 |
+
img_node.select = True
|
| 502 |
+
nodes.active = img_node
|
| 503 |
+
img_node.select = True
|
| 504 |
+
|
| 505 |
+
if len(output.inputs["Displacement"].links) != 0:
|
| 506 |
+
bake_obj = True
|
| 507 |
+
|
| 508 |
+
if len(output.inputs[0].links) == 0:
|
| 509 |
+
logger.info(
|
| 510 |
+
f"{mat.name} has no surface output, not using baked textures"
|
| 511 |
+
)
|
| 512 |
+
bake_exclude_mats[mat] = img_node
|
| 513 |
+
continue
|
| 514 |
+
|
| 515 |
+
# surface_node = output.inputs[0].links[0].from_node
|
| 516 |
+
# if (
|
| 517 |
+
# bake_type in ALL_BAKE
|
| 518 |
+
# and surface_node.bl_idname == "ShaderNodeBsdfPrincipled"
|
| 519 |
+
# and len(surface_node.inputs[ALL_BAKE[bake_type]].links) == 0
|
| 520 |
+
# ): # trivial bsdf graph
|
| 521 |
+
# logger.info(
|
| 522 |
+
# f"{mat.name} has no procedural input for {bake_type}, not using baked textures"
|
| 523 |
+
# )
|
| 524 |
+
# bake_exclude_mats[mat] = img_node
|
| 525 |
+
# continue
|
| 526 |
+
|
| 527 |
+
bake_obj = True
|
| 528 |
+
|
| 529 |
+
if bake_type in SPECIAL_BAKE:
|
| 530 |
+
internal_bake_type = "EMIT"
|
| 531 |
+
else:
|
| 532 |
+
internal_bake_type = bake_type
|
| 533 |
+
|
| 534 |
+
if bake_obj:
|
| 535 |
+
logger.info(f"Baking {bake_type} pass")
|
| 536 |
+
bpy.ops.object.bake(
|
| 537 |
+
type=internal_bake_type,
|
| 538 |
+
pass_filter={"COLOR"},
|
| 539 |
+
save_mode="EXTERNAL",
|
| 540 |
+
)
|
| 541 |
+
img.filepath_raw = str(file_path)
|
| 542 |
+
img.save()
|
| 543 |
+
logger.info(f"Saving to {file_path}")
|
| 544 |
+
else:
|
| 545 |
+
logger.info(
|
| 546 |
+
f"No necessary materials to bake on {obj.name}, skipping bake"
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
for mat, img_node in bake_exclude_mats.items():
|
| 550 |
+
mat.node_tree.nodes.remove(img_node)
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def bake_special_emit(
|
| 554 |
+
obj, dest, img_size, export_usd, bake_type, export_name=None
|
| 555 |
+
):
|
| 556 |
+
# If at least one material has both a BSDF and non-zero bake type value, then bake
|
| 557 |
+
should_bake = False
|
| 558 |
+
|
| 559 |
+
# (Root node, From Socket, To Socket)
|
| 560 |
+
links_removed = []
|
| 561 |
+
links_added = []
|
| 562 |
+
|
| 563 |
+
for slot in obj.material_slots:
|
| 564 |
+
mat = slot.material
|
| 565 |
+
if mat is None:
|
| 566 |
+
logger.warn("No material on mesh, skipping...")
|
| 567 |
+
continue
|
| 568 |
+
if not mat.use_nodes:
|
| 569 |
+
logger.warn("Material has no nodes, skipping...")
|
| 570 |
+
continue
|
| 571 |
+
|
| 572 |
+
nodes = mat.node_tree.nodes
|
| 573 |
+
principled_bsdf_node = None
|
| 574 |
+
root_node = None
|
| 575 |
+
logger.info(f"{mat.name} has {len(nodes)} nodes: {nodes}")
|
| 576 |
+
for node in nodes:
|
| 577 |
+
if node.type != "GROUP":
|
| 578 |
+
continue
|
| 579 |
+
|
| 580 |
+
for subnode in node.node_tree.nodes:
|
| 581 |
+
logger.info(
|
| 582 |
+
f" [{subnode.type}] {subnode.name} {subnode.bl_idname}"
|
| 583 |
+
)
|
| 584 |
+
if subnode.type == "BSDF_PRINCIPLED":
|
| 585 |
+
logger.debug(f" BSDF_PRINCIPLED: {subnode.inputs}")
|
| 586 |
+
principled_bsdf_node = subnode
|
| 587 |
+
root_node = node
|
| 588 |
+
|
| 589 |
+
if nodes.get("Principled BSDF"):
|
| 590 |
+
principled_bsdf_node = nodes["Principled BSDF"]
|
| 591 |
+
root_node = mat
|
| 592 |
+
elif not principled_bsdf_node:
|
| 593 |
+
logger.warn("No Principled BSDF, skipping...")
|
| 594 |
+
continue
|
| 595 |
+
elif ALL_BAKE[bake_type] not in principled_bsdf_node.inputs:
|
| 596 |
+
logger.warn(f"No {bake_type} input, skipping...")
|
| 597 |
+
continue
|
| 598 |
+
|
| 599 |
+
# Here, we"ve found the proper BSDF and bake type input. Set up the scene graph
|
| 600 |
+
# for baking.
|
| 601 |
+
outputSoc = principled_bsdf_node.outputs[0].links[0].to_socket
|
| 602 |
+
|
| 603 |
+
# Remove the BSDF link to Output first
|
| 604 |
+
link = principled_bsdf_node.outputs[0].links[0]
|
| 605 |
+
from_socket, to_socket = link.from_socket, link.to_socket
|
| 606 |
+
logger.debug(f"Removing link: {from_socket.name} => {to_socket.name}")
|
| 607 |
+
root_node.node_tree.links.remove(link)
|
| 608 |
+
links_removed.append((root_node, from_socket, to_socket))
|
| 609 |
+
|
| 610 |
+
# Get bake_type value
|
| 611 |
+
bake_input = principled_bsdf_node.inputs[ALL_BAKE[bake_type]]
|
| 612 |
+
bake_val = bake_input.default_value
|
| 613 |
+
logger.info(f"{bake_type} value: {bake_val}")
|
| 614 |
+
|
| 615 |
+
if bake_val > 0:
|
| 616 |
+
should_bake = True
|
| 617 |
+
|
| 618 |
+
# Make a color input matching the metallic value
|
| 619 |
+
col = root_node.node_tree.nodes.new("ShaderNodeRGB")
|
| 620 |
+
col.outputs[0].default_value = (bake_val, bake_val, bake_val, 1.0)
|
| 621 |
+
|
| 622 |
+
# Link the color to output
|
| 623 |
+
new_link = root_node.node_tree.links.new(col.outputs[0], outputSoc)
|
| 624 |
+
links_added.append((root_node, col.outputs[0], outputSoc))
|
| 625 |
+
logger.debug(
|
| 626 |
+
f"Linking {col.outputs[0].name} to {outputSoc.name}({outputSoc.bl_idname}): {new_link}"
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# After setting up all materials, bake if applicable
|
| 630 |
+
if should_bake:
|
| 631 |
+
bake_pass(obj, dest, img_size, bake_type, export_usd, export_name)
|
| 632 |
+
|
| 633 |
+
# After baking, undo the temporary changes to the scene graph
|
| 634 |
+
for n, from_soc, to_soc in links_added:
|
| 635 |
+
logger.debug(
|
| 636 |
+
f"Removing added link:\t{n.name}: {from_soc.name} => {to_soc.name}"
|
| 637 |
+
)
|
| 638 |
+
for l in n.node_tree.links:
|
| 639 |
+
if l.from_socket == from_soc and l.to_socket == to_soc:
|
| 640 |
+
n.node_tree.links.remove(l)
|
| 641 |
+
logger.debug(
|
| 642 |
+
f"Removed link:\t{n.name}: {from_soc.name} => {to_soc.name}"
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
for n, from_soc, to_soc in links_removed:
|
| 646 |
+
logger.debug(
|
| 647 |
+
f"Adding back link:\t{n.name}: {from_soc.name} => {to_soc.name}"
|
| 648 |
+
)
|
| 649 |
+
n.node_tree.links.new(from_soc, to_soc)
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
def remove_params(mat, node_tree):
|
| 653 |
+
nodes = node_tree.nodes
|
| 654 |
+
paramDict = {}
|
| 655 |
+
if nodes.get("Material Output"):
|
| 656 |
+
output = nodes["Material Output"]
|
| 657 |
+
elif nodes.get("Group Output"):
|
| 658 |
+
output = nodes["Group Output"]
|
| 659 |
+
else:
|
| 660 |
+
raise ValueError("Could not find material output node")
|
| 661 |
+
|
| 662 |
+
if (
|
| 663 |
+
nodes.get("Principled BSDF")
|
| 664 |
+
and output.inputs[0].links[0].from_node.bl_idname
|
| 665 |
+
== "ShaderNodeBsdfPrincipled"
|
| 666 |
+
):
|
| 667 |
+
principled_bsdf_node = nodes["Principled BSDF"]
|
| 668 |
+
metal = principled_bsdf_node.inputs[
|
| 669 |
+
"Metallic"
|
| 670 |
+
].default_value # store metallic value and set to 0
|
| 671 |
+
sheen = principled_bsdf_node.inputs["Sheen Weight"].default_value
|
| 672 |
+
clearcoat = principled_bsdf_node.inputs["Coat Weight"].default_value
|
| 673 |
+
paramDict[mat.name] = {
|
| 674 |
+
"Metallic": metal,
|
| 675 |
+
"Sheen Weight": sheen,
|
| 676 |
+
"Coat Weight": clearcoat,
|
| 677 |
+
}
|
| 678 |
+
principled_bsdf_node.inputs["Metallic"].default_value = 0
|
| 679 |
+
principled_bsdf_node.inputs["Sheen Weight"].default_value = 0
|
| 680 |
+
principled_bsdf_node.inputs["Coat Weight"].default_value = 0
|
| 681 |
+
return paramDict
|
| 682 |
+
|
| 683 |
+
for node in nodes:
|
| 684 |
+
if node.type == "GROUP":
|
| 685 |
+
paramDict = remove_params(mat, node.node_tree)
|
| 686 |
+
if len(paramDict) != 0:
|
| 687 |
+
return paramDict
|
| 688 |
+
|
| 689 |
+
return paramDict
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
def process_interfering_params(obj):
|
| 693 |
+
for slot in obj.material_slots:
|
| 694 |
+
mat = slot.material
|
| 695 |
+
if mat is None or not mat.use_nodes:
|
| 696 |
+
continue
|
| 697 |
+
paramDict = remove_params(mat, mat.node_tree)
|
| 698 |
+
return paramDict
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def skipBake(obj):
|
| 702 |
+
if not obj.data.materials:
|
| 703 |
+
logger.info("No material on mesh, skipping...")
|
| 704 |
+
return True
|
| 705 |
+
|
| 706 |
+
if len(obj.data.vertices) == 0:
|
| 707 |
+
logger.info("Mesh has no vertices, skipping ...")
|
| 708 |
+
return True
|
| 709 |
+
|
| 710 |
+
return False
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def triangulate_mesh(obj: bpy.types.Object):
|
| 714 |
+
logger.debug("Triangulating Mesh")
|
| 715 |
+
if obj.type == "MESH":
|
| 716 |
+
view_state = obj.hide_viewport
|
| 717 |
+
obj.hide_viewport = False
|
| 718 |
+
bpy.context.view_layer.objects.active = obj
|
| 719 |
+
obj.select_set(True)
|
| 720 |
+
bpy.ops.object.mode_set(mode="EDIT")
|
| 721 |
+
bpy.ops.mesh.select_all(action="SELECT")
|
| 722 |
+
logger.debug(f"Triangulating {obj}")
|
| 723 |
+
bpy.ops.mesh.quads_convert_to_tris()
|
| 724 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 725 |
+
obj.select_set(False)
|
| 726 |
+
obj.hide_viewport = view_state
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
def triangulate_meshes():
|
| 730 |
+
logger.debug("Triangulating Meshes")
|
| 731 |
+
for obj in bpy.context.scene.objects:
|
| 732 |
+
if obj.type == "MESH":
|
| 733 |
+
view_state = obj.hide_viewport
|
| 734 |
+
obj.hide_viewport = False
|
| 735 |
+
bpy.context.view_layer.objects.active = obj
|
| 736 |
+
obj.select_set(True)
|
| 737 |
+
bpy.ops.object.mode_set(mode="EDIT")
|
| 738 |
+
bpy.ops.mesh.select_all(action="SELECT")
|
| 739 |
+
logger.debug(f"Triangulating {obj}")
|
| 740 |
+
bpy.ops.mesh.quads_convert_to_tris()
|
| 741 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 742 |
+
obj.select_set(False)
|
| 743 |
+
obj.hide_viewport = view_state
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
def adjust_wattages():
|
| 747 |
+
logger.info("Adjusting light wattage")
|
| 748 |
+
for obj in bpy.context.scene.objects:
|
| 749 |
+
if obj.type == "LIGHT" and obj.data.type == "POINT":
|
| 750 |
+
light = obj.data
|
| 751 |
+
if hasattr(light, "energy") and hasattr(light, "shadow_soft_size"):
|
| 752 |
+
X = light.energy
|
| 753 |
+
r = light.shadow_soft_size
|
| 754 |
+
# candelas * 1000 / (4 * math.pi * r**2). additionally units come out of blender at 1/100 scale
|
| 755 |
+
new_wattage = (
|
| 756 |
+
(X * 20 / (4 * math.pi))
|
| 757 |
+
* 1000
|
| 758 |
+
/ (4 * math.pi * r**2)
|
| 759 |
+
* 100
|
| 760 |
+
)
|
| 761 |
+
light.energy = new_wattage
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
def set_center_of_mass():
|
| 765 |
+
logger.info("Resetting center of mass of objects")
|
| 766 |
+
for obj in bpy.context.scene.objects:
|
| 767 |
+
if not obj.hide_render:
|
| 768 |
+
view_state = obj.hide_viewport
|
| 769 |
+
obj.hide_viewport = False
|
| 770 |
+
obj.select_set(True)
|
| 771 |
+
bpy.context.view_layer.objects.active = obj
|
| 772 |
+
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY", center="MEDIAN")
|
| 773 |
+
obj.select_set(False)
|
| 774 |
+
obj.hide_viewport = view_state
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def duplicate_node_groups(node_tree, group_map=None):
|
| 778 |
+
if group_map is None:
|
| 779 |
+
group_map = {}
|
| 780 |
+
|
| 781 |
+
for node in node_tree.nodes:
|
| 782 |
+
if node.type == "GROUP":
|
| 783 |
+
group = node.node_tree
|
| 784 |
+
if group not in group_map:
|
| 785 |
+
group_copy = group.copy()
|
| 786 |
+
group_copy.name = f"{group.name}_copy"
|
| 787 |
+
group_map[group] = group_copy
|
| 788 |
+
|
| 789 |
+
duplicate_node_groups(group_copy, group_map)
|
| 790 |
+
else:
|
| 791 |
+
group_copy = group_map[group]
|
| 792 |
+
|
| 793 |
+
node.node_tree = group_copy
|
| 794 |
+
|
| 795 |
+
return group_map
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def deep_copy_material(original_material, new_name_suffix="_deepcopy"):
|
| 799 |
+
new_mat = original_material.copy()
|
| 800 |
+
new_mat.name = original_material.name + new_name_suffix
|
| 801 |
+
if new_mat.use_nodes and new_mat.node_tree:
|
| 802 |
+
duplicate_node_groups(new_mat.node_tree)
|
| 803 |
+
return new_mat
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
def bake_object(obj, dest, img_size, export_usd, export_name=None):
|
| 807 |
+
if not uv_unwrap(obj):
|
| 808 |
+
return
|
| 809 |
+
|
| 810 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 811 |
+
|
| 812 |
+
with butil.SelectObjects(obj):
|
| 813 |
+
for slot in obj.material_slots:
|
| 814 |
+
mat = slot.material
|
| 815 |
+
if mat is not None:
|
| 816 |
+
slot.material = deep_copy_material(
|
| 817 |
+
mat
|
| 818 |
+
) # we duplicate in the case of distinct meshes sharing materials
|
| 819 |
+
|
| 820 |
+
process_glass_materials(obj, export_usd)
|
| 821 |
+
|
| 822 |
+
for bake_type in SPECIAL_BAKE:
|
| 823 |
+
bake_special_emit(
|
| 824 |
+
obj, dest, img_size, export_usd, bake_type, export_name
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
# bake_normals(obj, dest, img_size, export_usd)
|
| 828 |
+
paramDict = process_interfering_params(obj)
|
| 829 |
+
for bake_type in BAKE_TYPES:
|
| 830 |
+
bake_pass(obj, dest, img_size, bake_type, export_usd, export_name)
|
| 831 |
+
|
| 832 |
+
apply_baked_tex(obj, paramDict)
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
def bake_scene(folderPath: Path, image_res, vertex_colors, export_usd):
|
| 836 |
+
for obj in bpy.data.objects:
|
| 837 |
+
logger.info("---------------------------")
|
| 838 |
+
logger.info(obj.name)
|
| 839 |
+
|
| 840 |
+
if obj.type != "MESH" or obj not in list(
|
| 841 |
+
bpy.context.view_layer.objects
|
| 842 |
+
):
|
| 843 |
+
logger.info("Not mesh, skipping ...")
|
| 844 |
+
continue
|
| 845 |
+
|
| 846 |
+
if skipBake(obj):
|
| 847 |
+
continue
|
| 848 |
+
|
| 849 |
+
if format == "stl":
|
| 850 |
+
continue
|
| 851 |
+
|
| 852 |
+
obj.hide_render = False
|
| 853 |
+
obj.hide_viewport = False
|
| 854 |
+
|
| 855 |
+
if vertex_colors:
|
| 856 |
+
bakeVertexColors(obj)
|
| 857 |
+
else:
|
| 858 |
+
bake_object(obj, folderPath, image_res, export_usd)
|
| 859 |
+
|
| 860 |
+
obj.hide_render = True
|
| 861 |
+
obj.hide_viewport = True
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def run_blender_export(
|
| 865 |
+
exportPath: Path, format: str, vertex_colors: bool, individual_export: bool
|
| 866 |
+
):
|
| 867 |
+
assert exportPath.parent.exists()
|
| 868 |
+
exportPath = str(exportPath)
|
| 869 |
+
|
| 870 |
+
if format == "obj":
|
| 871 |
+
if vertex_colors:
|
| 872 |
+
bpy.ops.wm.obj_export(
|
| 873 |
+
filepath=exportPath,
|
| 874 |
+
export_colors=True,
|
| 875 |
+
export_eval_mode="DAG_EVAL_RENDER",
|
| 876 |
+
export_selected_objects=individual_export,
|
| 877 |
+
)
|
| 878 |
+
else:
|
| 879 |
+
bpy.ops.wm.obj_export(
|
| 880 |
+
filepath=exportPath,
|
| 881 |
+
path_mode="COPY",
|
| 882 |
+
export_materials=True,
|
| 883 |
+
export_pbr_extensions=False,
|
| 884 |
+
export_eval_mode="DAG_EVAL_RENDER",
|
| 885 |
+
export_selected_objects=individual_export,
|
| 886 |
+
export_triangulated_mesh=True,
|
| 887 |
+
export_normals=False,
|
| 888 |
+
)
|
| 889 |
+
|
| 890 |
+
if format == "fbx":
|
| 891 |
+
if vertex_colors:
|
| 892 |
+
bpy.ops.export_scene.fbx(
|
| 893 |
+
filepath=exportPath,
|
| 894 |
+
colors_type="SRGB",
|
| 895 |
+
use_selection=individual_export,
|
| 896 |
+
)
|
| 897 |
+
else:
|
| 898 |
+
bpy.ops.export_scene.fbx(
|
| 899 |
+
filepath=exportPath,
|
| 900 |
+
path_mode="COPY",
|
| 901 |
+
embed_textures=True,
|
| 902 |
+
use_selection=individual_export,
|
| 903 |
+
)
|
| 904 |
+
|
| 905 |
+
if format == "stl":
|
| 906 |
+
bpy.ops.export_mesh.stl(
|
| 907 |
+
filepath=exportPath, use_selection=individual_export
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
if format == "ply":
|
| 911 |
+
bpy.ops.wm.ply_export(
|
| 912 |
+
filepath=exportPath, export_selected_objects=individual_export
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
if format in ["usda", "usdc"]:
|
| 916 |
+
bpy.ops.wm.usd_export(
|
| 917 |
+
filepath=exportPath,
|
| 918 |
+
export_textures=True,
|
| 919 |
+
# use_instancing=True,
|
| 920 |
+
overwrite_textures=True,
|
| 921 |
+
selected_objects_only=individual_export,
|
| 922 |
+
root_prim_path="/World",
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
def export_scene(
|
| 927 |
+
input_blend: Path,
|
| 928 |
+
output_folder: Path,
|
| 929 |
+
pipeline_folder=None,
|
| 930 |
+
task_uniqname=None,
|
| 931 |
+
**kwargs,
|
| 932 |
+
):
|
| 933 |
+
folder = output_folder / f"export_{os.path.splitext(input_blend.name)[0]}"
|
| 934 |
+
folder.mkdir(exist_ok=True, parents=True)
|
| 935 |
+
export_curr_scene(folder, **kwargs)
|
| 936 |
+
|
| 937 |
+
if pipeline_folder is not None and task_uniqname is not None:
|
| 938 |
+
(pipeline_folder / "logs" / f"FINISH_{task_uniqname}").touch()
|
| 939 |
+
|
| 940 |
+
return folder
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
# side effects: will remove parents of inputted obj and clean its name, hides viewport of all objects
|
| 944 |
+
def export_single_obj(
|
| 945 |
+
obj: bpy.types.Object,
|
| 946 |
+
output_folder: Path,
|
| 947 |
+
format="usdc",
|
| 948 |
+
image_res=1024,
|
| 949 |
+
vertex_colors=False,
|
| 950 |
+
):
|
| 951 |
+
export_usd = format in ["usda", "usdc"]
|
| 952 |
+
|
| 953 |
+
export_folder = output_folder
|
| 954 |
+
export_folder.mkdir(parents=True, exist_ok=True)
|
| 955 |
+
export_file = export_folder / output_folder.with_suffix(f".{format}").name
|
| 956 |
+
|
| 957 |
+
logger.info(f"Exporting to directory {export_folder=}")
|
| 958 |
+
|
| 959 |
+
remove_obj_parents(obj)
|
| 960 |
+
rename_all_meshes(obj)
|
| 961 |
+
|
| 962 |
+
collection_views, obj_views = update_visibility()
|
| 963 |
+
|
| 964 |
+
bpy.context.scene.render.engine = "CYCLES"
|
| 965 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 966 |
+
bpy.context.scene.cycles.samples = 1 # choose render sample
|
| 967 |
+
# Set the tile size
|
| 968 |
+
bpy.context.scene.cycles.tile_x = image_res
|
| 969 |
+
bpy.context.scene.cycles.tile_y = image_res
|
| 970 |
+
|
| 971 |
+
if obj.type != "MESH" or obj not in list(bpy.context.view_layer.objects):
|
| 972 |
+
raise ValueError("Object not mesh")
|
| 973 |
+
|
| 974 |
+
if export_usd:
|
| 975 |
+
apply_all_modifiers(obj)
|
| 976 |
+
else:
|
| 977 |
+
realizeInstances(obj)
|
| 978 |
+
apply_all_modifiers(obj)
|
| 979 |
+
|
| 980 |
+
if not skipBake(obj) and format != "stl":
|
| 981 |
+
if vertex_colors:
|
| 982 |
+
bakeVertexColors(obj)
|
| 983 |
+
else:
|
| 984 |
+
obj.hide_render = False
|
| 985 |
+
obj.hide_viewport = False
|
| 986 |
+
bake_object(obj, export_folder / "textures", image_res, export_usd)
|
| 987 |
+
obj.hide_render = True
|
| 988 |
+
obj.hide_viewport = True
|
| 989 |
+
|
| 990 |
+
for collection, status in collection_views.items():
|
| 991 |
+
collection.hide_render = status
|
| 992 |
+
|
| 993 |
+
for obj, status in obj_views.items():
|
| 994 |
+
obj.hide_render = status
|
| 995 |
+
|
| 996 |
+
clean_names(obj)
|
| 997 |
+
|
| 998 |
+
old_loc = obj.location.copy()
|
| 999 |
+
obj.location = (0, 0, 0)
|
| 1000 |
+
|
| 1001 |
+
if (
|
| 1002 |
+
obj.type != "MESH"
|
| 1003 |
+
or obj.hide_render
|
| 1004 |
+
or len(obj.data.vertices) == 0
|
| 1005 |
+
or obj not in list(bpy.context.view_layer.objects)
|
| 1006 |
+
):
|
| 1007 |
+
raise ValueError("Object is not mesh or hidden from render")
|
| 1008 |
+
|
| 1009 |
+
export_subfolder = export_folder / obj.name
|
| 1010 |
+
export_subfolder.mkdir(exist_ok=True)
|
| 1011 |
+
export_file = export_subfolder / f"{obj.name}.{format}"
|
| 1012 |
+
|
| 1013 |
+
logger.info(f"Exporting file to {export_file=}")
|
| 1014 |
+
obj.hide_viewport = False
|
| 1015 |
+
obj.select_set(True)
|
| 1016 |
+
run_blender_export(
|
| 1017 |
+
export_file, format, vertex_colors, individual_export=True
|
| 1018 |
+
)
|
| 1019 |
+
obj.select_set(False)
|
| 1020 |
+
obj.location = old_loc
|
| 1021 |
+
|
| 1022 |
+
return export_file
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
def export_sim_ready(
|
| 1026 |
+
obj: bpy.types.Object,
|
| 1027 |
+
output_folder: Path,
|
| 1028 |
+
image_res: int = 1024,
|
| 1029 |
+
translation: Tuple = (0, 0, 0),
|
| 1030 |
+
name: Optional[str] = None,
|
| 1031 |
+
visual_only: bool = False,
|
| 1032 |
+
collision_only: bool = False,
|
| 1033 |
+
separate_asset_dirs: bool = True,
|
| 1034 |
+
) -> Dict[str, List[Path]]:
|
| 1035 |
+
"""Exports both the visual and collision assets for a geometry."""
|
| 1036 |
+
if not visual_only:
|
| 1037 |
+
assert (
|
| 1038 |
+
coacd is not None
|
| 1039 |
+
), "coacd is required to export simulation assets."
|
| 1040 |
+
|
| 1041 |
+
asset_exports = defaultdict(list)
|
| 1042 |
+
export_name = name if name is not None else obj.name
|
| 1043 |
+
|
| 1044 |
+
if separate_asset_dirs:
|
| 1045 |
+
visual_export_folder = output_folder / "visual"
|
| 1046 |
+
collision_export_folder = output_folder / "collision"
|
| 1047 |
+
else:
|
| 1048 |
+
visual_export_folder = output_folder
|
| 1049 |
+
collision_export_folder = output_folder
|
| 1050 |
+
|
| 1051 |
+
texture_export_folder = output_folder / "textures"
|
| 1052 |
+
|
| 1053 |
+
visual_export_folder.mkdir(parents=True, exist_ok=True)
|
| 1054 |
+
collision_export_folder.mkdir(parents=True, exist_ok=True)
|
| 1055 |
+
|
| 1056 |
+
logger.info(f"Exporting to directory {output_folder=}")
|
| 1057 |
+
|
| 1058 |
+
collection_views, obj_views = update_visibility()
|
| 1059 |
+
|
| 1060 |
+
bpy.context.scene.render.engine = "CYCLES"
|
| 1061 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 1062 |
+
bpy.context.scene.cycles.samples = 1 # choose render sample
|
| 1063 |
+
# Set the tile size
|
| 1064 |
+
bpy.context.scene.cycles.tile_x = image_res
|
| 1065 |
+
bpy.context.scene.cycles.tile_y = image_res
|
| 1066 |
+
|
| 1067 |
+
if obj.type != "MESH" or obj not in list(bpy.context.view_layer.objects):
|
| 1068 |
+
raise ValueError("Object not mesh")
|
| 1069 |
+
|
| 1070 |
+
# export the textures
|
| 1071 |
+
if not skipBake(obj):
|
| 1072 |
+
texture_export_folder.mkdir(parents=True, exist_ok=True)
|
| 1073 |
+
obj.hide_render = False
|
| 1074 |
+
obj.hide_viewport = False
|
| 1075 |
+
bake_object(obj, texture_export_folder, image_res, False, export_name)
|
| 1076 |
+
obj.hide_render = True
|
| 1077 |
+
obj.hide_viewport = True
|
| 1078 |
+
|
| 1079 |
+
for collection, status in collection_views.items():
|
| 1080 |
+
collection.hide_render = status
|
| 1081 |
+
|
| 1082 |
+
for obj_tmp, status in obj_views.items():
|
| 1083 |
+
obj_tmp.hide_render = status
|
| 1084 |
+
|
| 1085 |
+
# translating object
|
| 1086 |
+
old_loc = obj.location.copy()
|
| 1087 |
+
obj.location = (
|
| 1088 |
+
old_loc[0] + translation[0],
|
| 1089 |
+
old_loc[1] + translation[1],
|
| 1090 |
+
old_loc[2] + translation[2],
|
| 1091 |
+
)
|
| 1092 |
+
|
| 1093 |
+
if (
|
| 1094 |
+
obj.type != "MESH"
|
| 1095 |
+
or obj.hide_render
|
| 1096 |
+
or len(obj.data.vertices) == 0
|
| 1097 |
+
or obj not in list(bpy.context.view_layer.objects)
|
| 1098 |
+
):
|
| 1099 |
+
raise ValueError("Object is not mesh or hidden from render")
|
| 1100 |
+
|
| 1101 |
+
# export the mesh assets
|
| 1102 |
+
visual_export_file = visual_export_folder / f"{export_name}.obj"
|
| 1103 |
+
|
| 1104 |
+
logger.info(f"Exporting file to {visual_export_file=}")
|
| 1105 |
+
obj.hide_viewport = False
|
| 1106 |
+
obj.select_set(True)
|
| 1107 |
+
|
| 1108 |
+
# export visual asset
|
| 1109 |
+
with butil.SelectObjects(obj, active=1):
|
| 1110 |
+
bpy.ops.wm.obj_export(
|
| 1111 |
+
filepath=str(visual_export_file),
|
| 1112 |
+
up_axis="Z",
|
| 1113 |
+
forward_axis="Y",
|
| 1114 |
+
export_selected_objects=True,
|
| 1115 |
+
export_triangulated_mesh=True, # required for coacd to run properly
|
| 1116 |
+
)
|
| 1117 |
+
if not collision_only:
|
| 1118 |
+
asset_exports["visual"].append(visual_export_file)
|
| 1119 |
+
|
| 1120 |
+
if visual_only:
|
| 1121 |
+
obj.select_set(False)
|
| 1122 |
+
obj.location = old_loc
|
| 1123 |
+
return asset_exports
|
| 1124 |
+
|
| 1125 |
+
clone = butil.deep_clone_obj(obj)
|
| 1126 |
+
parts = butil.split_object(clone)
|
| 1127 |
+
|
| 1128 |
+
part_export_obj_file = visual_export_folder / f"{export_name}_part.obj"
|
| 1129 |
+
part_export_mtl_file = visual_export_folder / f"{export_name}_part.mtl"
|
| 1130 |
+
|
| 1131 |
+
collision_count = 0
|
| 1132 |
+
for part in parts:
|
| 1133 |
+
with butil.SelectObjects(part, active=1):
|
| 1134 |
+
bpy.ops.wm.obj_export(
|
| 1135 |
+
filepath=str(part_export_obj_file),
|
| 1136 |
+
up_axis="Z",
|
| 1137 |
+
forward_axis="Y",
|
| 1138 |
+
export_selected_objects=True,
|
| 1139 |
+
export_triangulated_mesh=True, # required for coacd to run properly
|
| 1140 |
+
)
|
| 1141 |
+
|
| 1142 |
+
# export the collision meshes
|
| 1143 |
+
mesh_tri = trimesh.load(
|
| 1144 |
+
str(part_export_obj_file),
|
| 1145 |
+
merge_norm=True,
|
| 1146 |
+
merge_tex=True,
|
| 1147 |
+
force="mesh",
|
| 1148 |
+
)
|
| 1149 |
+
trimesh.repair.fix_inversion(mesh_tri)
|
| 1150 |
+
preprocess_mode = "off"
|
| 1151 |
+
if not mesh_tri.is_volume:
|
| 1152 |
+
print(
|
| 1153 |
+
mesh_tri.is_watertight,
|
| 1154 |
+
mesh_tri.is_winding_consistent,
|
| 1155 |
+
np.isfinite(mesh_tri.center_mass).all(),
|
| 1156 |
+
mesh_tri.volume > 0.0,
|
| 1157 |
+
)
|
| 1158 |
+
preprocess_mode = "on"
|
| 1159 |
+
|
| 1160 |
+
if len(mesh_tri.vertices) < 4:
|
| 1161 |
+
logger.warning(
|
| 1162 |
+
f"Mesh is not a volume. Only has {len(mesh_tri.vertices)} vertices."
|
| 1163 |
+
)
|
| 1164 |
+
# raise ValueError(f"Mesh is not a volume. Only has {len(mesh_tri.vertices)} vertices.")
|
| 1165 |
+
mesh = coacd.Mesh(mesh_tri.vertices, mesh_tri.faces)
|
| 1166 |
+
|
| 1167 |
+
subparts = coacd.run_coacd(
|
| 1168 |
+
mesh=mesh,
|
| 1169 |
+
threshold=0.05,
|
| 1170 |
+
max_convex_hull=-1,
|
| 1171 |
+
preprocess_mode=preprocess_mode,
|
| 1172 |
+
mcts_max_depth=3,
|
| 1173 |
+
)
|
| 1174 |
+
export_name = export_name.replace("vis", "col")
|
| 1175 |
+
for vs, fs in subparts:
|
| 1176 |
+
collision_export_file = (
|
| 1177 |
+
collision_export_folder
|
| 1178 |
+
/ f"{export_name}_col{collision_count}.obj"
|
| 1179 |
+
)
|
| 1180 |
+
subpart_mesh = trimesh.Trimesh(vs, fs)
|
| 1181 |
+
|
| 1182 |
+
# if subpart_mesh.is_empty:
|
| 1183 |
+
# raise ValueError(
|
| 1184 |
+
# "Warning: Collision mesh is completely outside the bounds of the original mesh."
|
| 1185 |
+
# )
|
| 1186 |
+
subpart_mesh.export(str(collision_export_file))
|
| 1187 |
+
asset_exports["collision"].append(collision_export_file)
|
| 1188 |
+
collision_count += 1
|
| 1189 |
+
|
| 1190 |
+
# delete temporary part files
|
| 1191 |
+
part_export_obj_file.unlink(missing_ok=True)
|
| 1192 |
+
part_export_mtl_file.unlink(missing_ok=True)
|
| 1193 |
+
|
| 1194 |
+
obj.select_set(False)
|
| 1195 |
+
obj.location = old_loc
|
| 1196 |
+
butil.delete(clone)
|
| 1197 |
+
|
| 1198 |
+
return asset_exports
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
@gin.configurable
|
| 1202 |
+
def export_curr_scene(
|
| 1203 |
+
output_folder: Path,
|
| 1204 |
+
format="usdc",
|
| 1205 |
+
image_res=1024,
|
| 1206 |
+
vertex_colors=False,
|
| 1207 |
+
individual_export=False,
|
| 1208 |
+
omniverse_export=False,
|
| 1209 |
+
pipeline_folder=None,
|
| 1210 |
+
task_uniqname=None,
|
| 1211 |
+
deconvex=False,
|
| 1212 |
+
center_scene=False,
|
| 1213 |
+
align_quat=(0.7071, 0, 0, 0.7071), # xyzw
|
| 1214 |
+
) -> Path:
|
| 1215 |
+
export_usd = format in ["usda", "usdc"]
|
| 1216 |
+
export_folder = output_folder
|
| 1217 |
+
export_folder.mkdir(exist_ok=True)
|
| 1218 |
+
export_file = export_folder / output_folder.with_suffix(f".{format}").name
|
| 1219 |
+
logger.info(f"Exporting to directory {export_folder=}")
|
| 1220 |
+
|
| 1221 |
+
remove_obj_parents()
|
| 1222 |
+
delete_objects()
|
| 1223 |
+
triangulate_meshes()
|
| 1224 |
+
if omniverse_export:
|
| 1225 |
+
split_glass_mats()
|
| 1226 |
+
rename_all_meshes()
|
| 1227 |
+
|
| 1228 |
+
# remove 0 polygon meshes
|
| 1229 |
+
for obj in bpy.data.objects:
|
| 1230 |
+
if obj.type == "MESH" and len(obj.data.polygons) == 0:
|
| 1231 |
+
logger.info(f"{obj.name} has no faces, removing...")
|
| 1232 |
+
bpy.data.objects.remove(obj, do_unlink=True)
|
| 1233 |
+
|
| 1234 |
+
if center_scene:
|
| 1235 |
+
from mathutils import Vector
|
| 1236 |
+
|
| 1237 |
+
positions = []
|
| 1238 |
+
view_objs = set(bpy.context.view_layer.objects)
|
| 1239 |
+
for obj in bpy.data.objects:
|
| 1240 |
+
if (
|
| 1241 |
+
obj.type == "MESH"
|
| 1242 |
+
and obj.data
|
| 1243 |
+
and obj.data.vertices
|
| 1244 |
+
and obj.data.polygons
|
| 1245 |
+
and not obj.hide_render
|
| 1246 |
+
and obj in view_objs
|
| 1247 |
+
):
|
| 1248 |
+
pos = np.array(obj.matrix_world.translation)
|
| 1249 |
+
if not np.allclose(pos, 0):
|
| 1250 |
+
positions.append(pos)
|
| 1251 |
+
|
| 1252 |
+
if len(positions) > 0:
|
| 1253 |
+
positions = np.stack(positions)
|
| 1254 |
+
center = (positions.min(axis=0) + positions.max(axis=0)) * 0.5
|
| 1255 |
+
center[2] = positions[:, 2].min() # Set floor to 0 among z-axis.
|
| 1256 |
+
for obj in bpy.data.objects:
|
| 1257 |
+
pos = np.array(obj.matrix_world.translation)
|
| 1258 |
+
if not np.allclose(pos, 0):
|
| 1259 |
+
obj.location -= Vector(center)
|
| 1260 |
+
|
| 1261 |
+
scatter_cols = []
|
| 1262 |
+
if export_usd:
|
| 1263 |
+
if bpy.data.collections.get("scatter"):
|
| 1264 |
+
scatter_cols.append(bpy.data.collections["scatter"])
|
| 1265 |
+
if bpy.data.collections.get("scatters"):
|
| 1266 |
+
scatter_cols.append(bpy.data.collections["scatters"])
|
| 1267 |
+
for col in scatter_cols:
|
| 1268 |
+
for obj in col.all_objects:
|
| 1269 |
+
remove_shade_smooth(obj)
|
| 1270 |
+
|
| 1271 |
+
collection_views, obj_views = update_visibility()
|
| 1272 |
+
for obj in bpy.data.objects:
|
| 1273 |
+
if obj.type != "MESH" or obj not in list(
|
| 1274 |
+
bpy.context.view_layer.objects
|
| 1275 |
+
):
|
| 1276 |
+
continue
|
| 1277 |
+
if export_usd:
|
| 1278 |
+
apply_all_modifiers(obj)
|
| 1279 |
+
else:
|
| 1280 |
+
realizeInstances(obj)
|
| 1281 |
+
apply_all_modifiers(obj)
|
| 1282 |
+
|
| 1283 |
+
bpy.context.scene.render.engine = "CYCLES"
|
| 1284 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 1285 |
+
bpy.context.scene.cycles.samples = 1 # choose render sample
|
| 1286 |
+
# Set the tile size
|
| 1287 |
+
bpy.context.scene.cycles.tile_x = image_res
|
| 1288 |
+
bpy.context.scene.cycles.tile_y = image_res
|
| 1289 |
+
|
| 1290 |
+
# iterate through all objects and bake them
|
| 1291 |
+
bake_scene(
|
| 1292 |
+
folderPath=export_folder / "textures",
|
| 1293 |
+
image_res=image_res,
|
| 1294 |
+
vertex_colors=vertex_colors,
|
| 1295 |
+
export_usd=export_usd,
|
| 1296 |
+
)
|
| 1297 |
+
|
| 1298 |
+
for collection, status in collection_views.items():
|
| 1299 |
+
collection.hide_render = status
|
| 1300 |
+
|
| 1301 |
+
for obj, status in obj_views.items():
|
| 1302 |
+
obj.hide_render = status
|
| 1303 |
+
|
| 1304 |
+
clean_names()
|
| 1305 |
+
|
| 1306 |
+
for obj in bpy.data.objects:
|
| 1307 |
+
obj.hide_viewport = obj.hide_render
|
| 1308 |
+
|
| 1309 |
+
if omniverse_export:
|
| 1310 |
+
adjust_wattages()
|
| 1311 |
+
set_center_of_mass()
|
| 1312 |
+
|
| 1313 |
+
if individual_export:
|
| 1314 |
+
import math
|
| 1315 |
+
import xml.etree.ElementTree as ET
|
| 1316 |
+
from xml.dom import minidom
|
| 1317 |
+
|
| 1318 |
+
import trimesh
|
| 1319 |
+
from scipy.spatial.transform import Rotation
|
| 1320 |
+
from embodied_gen.data.convex_decomposer import decompose_convex_mesh
|
| 1321 |
+
|
| 1322 |
+
urdf_root = ET.Element("robot", name="multi_object_scene")
|
| 1323 |
+
ET.SubElement(urdf_root, "link", name="base")
|
| 1324 |
+
object_info = []
|
| 1325 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 1326 |
+
objects = list(bpy.data.objects)
|
| 1327 |
+
for obj in objects:
|
| 1328 |
+
if (
|
| 1329 |
+
obj.type != "MESH"
|
| 1330 |
+
or obj.data is None
|
| 1331 |
+
or len(obj.data.vertices) == 0
|
| 1332 |
+
or len(obj.data.polygons) == 0
|
| 1333 |
+
or obj.hide_render
|
| 1334 |
+
or obj not in list(bpy.context.view_layer.objects)
|
| 1335 |
+
):
|
| 1336 |
+
continue
|
| 1337 |
+
|
| 1338 |
+
obj_name = obj.name.replace("/", "_").replace("-", "_")
|
| 1339 |
+
obj_name = obj_name.replace("(", "_").replace(")", "")
|
| 1340 |
+
obj.name = obj_name
|
| 1341 |
+
export_subfolder = export_folder / obj_name
|
| 1342 |
+
export_subfolder.mkdir(exist_ok=True, parents=True)
|
| 1343 |
+
export_file = export_subfolder / f"{obj_name}.{format}"
|
| 1344 |
+
|
| 1345 |
+
if "skirtingboard" in obj_name.lower():
|
| 1346 |
+
logger.info(f"Skipping skirting board {obj_name}")
|
| 1347 |
+
continue
|
| 1348 |
+
|
| 1349 |
+
logger.info(f"Exporting file to {export_file=}")
|
| 1350 |
+
obj.hide_viewport = False
|
| 1351 |
+
|
| 1352 |
+
position = obj.matrix_world.to_translation()
|
| 1353 |
+
rotation = Rotation.from_quat(align_quat)
|
| 1354 |
+
rotation = rotation.as_euler("xyz", degrees=False)
|
| 1355 |
+
|
| 1356 |
+
obj.select_set(True)
|
| 1357 |
+
bpy.context.view_layer.objects.active = obj
|
| 1358 |
+
bpy.ops.object.location_clear()
|
| 1359 |
+
|
| 1360 |
+
face_count = len(obj.data.polygons)
|
| 1361 |
+
if face_count > 1000:
|
| 1362 |
+
if face_count > 1000000:
|
| 1363 |
+
ratio = 0.005
|
| 1364 |
+
elif face_count > 100000:
|
| 1365 |
+
ratio = 0.02
|
| 1366 |
+
elif face_count > 10000:
|
| 1367 |
+
ratio = 0.1
|
| 1368 |
+
else:
|
| 1369 |
+
ratio = 0.2
|
| 1370 |
+
angle_threshold = math.radians(5)
|
| 1371 |
+
bpy.ops.object.mode_set(mode="OBJECT")
|
| 1372 |
+
dec_mod = obj.modifiers.new(name="Decimate", type="DECIMATE")
|
| 1373 |
+
dec_mod.decimate_type = "DISSOLVE"
|
| 1374 |
+
dec_mod.angle_limit = angle_threshold
|
| 1375 |
+
dec_mod.use_collapse_triangulate = False
|
| 1376 |
+
dec_mod.ratio = ratio
|
| 1377 |
+
bpy.ops.object.modifier_apply(modifier=dec_mod.name)
|
| 1378 |
+
|
| 1379 |
+
run_blender_export(
|
| 1380 |
+
export_file, format, vertex_colors, individual_export
|
| 1381 |
+
)
|
| 1382 |
+
obj.select_set(False)
|
| 1383 |
+
|
| 1384 |
+
mesh = trimesh.load(export_file)
|
| 1385 |
+
if isinstance(mesh, trimesh.Scene) and len(mesh.geometry) == 0:
|
| 1386 |
+
shutil.rmtree(export_file.parent)
|
| 1387 |
+
continue
|
| 1388 |
+
|
| 1389 |
+
object_info.append(
|
| 1390 |
+
{
|
| 1391 |
+
"name": obj_name,
|
| 1392 |
+
"mesh_path": f"{obj_name}/{obj_name}.{format}",
|
| 1393 |
+
"mesh_abs_path": str(export_file),
|
| 1394 |
+
"xyz": tuple(position),
|
| 1395 |
+
"rpy": tuple(rotation),
|
| 1396 |
+
}
|
| 1397 |
+
)
|
| 1398 |
+
|
| 1399 |
+
for obj in object_info:
|
| 1400 |
+
link = ET.SubElement(urdf_root, "link", name=obj["name"])
|
| 1401 |
+
visual = ET.SubElement(link, "visual")
|
| 1402 |
+
geom = ET.SubElement(visual, "geometry")
|
| 1403 |
+
ET.SubElement(
|
| 1404 |
+
geom, "mesh", filename=obj["mesh_path"], scale="1 1 1"
|
| 1405 |
+
)
|
| 1406 |
+
if deconvex:
|
| 1407 |
+
print("Deconvexing mesh for collision, waiting...")
|
| 1408 |
+
d_params = dict(
|
| 1409 |
+
threshold=0.05, max_convex_hull=128, verbose=False
|
| 1410 |
+
)
|
| 1411 |
+
mesh_path = obj["mesh_abs_path"]
|
| 1412 |
+
output_path = mesh_path.replace(".obj", "_collision.obj")
|
| 1413 |
+
decompose_convex_mesh(mesh_path, output_path, **d_params)
|
| 1414 |
+
collision_mesh = obj["mesh_path"].replace(
|
| 1415 |
+
".obj", "_collision.obj"
|
| 1416 |
+
)
|
| 1417 |
+
collision = ET.SubElement(link, "collision")
|
| 1418 |
+
geom2 = ET.SubElement(collision, "geometry")
|
| 1419 |
+
ET.SubElement(
|
| 1420 |
+
geom2, "mesh", filename=collision_mesh, scale="1 1 1"
|
| 1421 |
+
)
|
| 1422 |
+
|
| 1423 |
+
joint = ET.SubElement(
|
| 1424 |
+
urdf_root, "joint", name=f"joint_{obj['name']}", type="fixed"
|
| 1425 |
+
)
|
| 1426 |
+
ET.SubElement(joint, "parent", link="base")
|
| 1427 |
+
ET.SubElement(joint, "child", link=obj["name"])
|
| 1428 |
+
ET.SubElement(
|
| 1429 |
+
joint,
|
| 1430 |
+
"origin",
|
| 1431 |
+
xyz="%.4f %.4f %.4f" % obj["xyz"],
|
| 1432 |
+
rpy="%.4f %.4f %.4f" % obj["rpy"],
|
| 1433 |
+
)
|
| 1434 |
+
|
| 1435 |
+
urdf_str = minidom.parseString(ET.tostring(urdf_root)).toprettyxml(
|
| 1436 |
+
indent=" "
|
| 1437 |
+
)
|
| 1438 |
+
urdf_path = export_folder / "scene.urdf"
|
| 1439 |
+
with open(urdf_path, "w") as f:
|
| 1440 |
+
f.write(urdf_str)
|
| 1441 |
+
logger.info(f"URDF exported to {urdf_path}")
|
| 1442 |
+
|
| 1443 |
+
return urdf_path
|
| 1444 |
+
else:
|
| 1445 |
+
logger.info(f"Exporting file to {export_file=}")
|
| 1446 |
+
run_blender_export(
|
| 1447 |
+
export_file, format, vertex_colors, individual_export
|
| 1448 |
+
)
|
| 1449 |
+
|
| 1450 |
+
return export_file
|
| 1451 |
+
|
| 1452 |
+
|
| 1453 |
+
def main(args):
|
| 1454 |
+
args.output_folder.mkdir(exist_ok=True)
|
| 1455 |
+
targets = sorted(list(args.input_folder.iterdir()))
|
| 1456 |
+
for blendfile in targets:
|
| 1457 |
+
if blendfile.stem == "solve_state":
|
| 1458 |
+
shutil.copy(blendfile, args.output_folder / "solve_state.json")
|
| 1459 |
+
|
| 1460 |
+
if not blendfile.suffix == ".blend":
|
| 1461 |
+
print(f"Skipping non-blend file {blendfile}")
|
| 1462 |
+
continue
|
| 1463 |
+
|
| 1464 |
+
bpy.ops.wm.open_mainfile(filepath=str(blendfile))
|
| 1465 |
+
|
| 1466 |
+
folder = export_scene(
|
| 1467 |
+
blendfile,
|
| 1468 |
+
args.output_folder,
|
| 1469 |
+
format=args.format,
|
| 1470 |
+
image_res=args.resolution,
|
| 1471 |
+
vertex_colors=args.vertex_colors,
|
| 1472 |
+
individual_export=args.individual,
|
| 1473 |
+
omniverse_export=args.omniverse,
|
| 1474 |
+
deconvex=args.deconvex,
|
| 1475 |
+
center_scene=args.center_scene,
|
| 1476 |
+
)
|
| 1477 |
+
# wanted to use shutil here but kept making corrupted files
|
| 1478 |
+
subprocess.call(
|
| 1479 |
+
["zip", "-r", str(folder.with_suffix(".zip")), str(folder)]
|
| 1480 |
+
)
|
| 1481 |
+
|
| 1482 |
+
bpy.ops.wm.quit_blender()
|
| 1483 |
+
|
| 1484 |
+
|
| 1485 |
+
def make_args():
|
| 1486 |
+
parser = argparse.ArgumentParser()
|
| 1487 |
+
|
| 1488 |
+
parser.add_argument("--input_folder", type=Path)
|
| 1489 |
+
parser.add_argument("--output_folder", type=Path)
|
| 1490 |
+
|
| 1491 |
+
parser.add_argument("-f", "--format", type=str, choices=FORMAT_CHOICES)
|
| 1492 |
+
|
| 1493 |
+
parser.add_argument("-v", "--vertex_colors", action="store_true")
|
| 1494 |
+
parser.add_argument("-r", "--resolution", default=1024, type=int)
|
| 1495 |
+
parser.add_argument("-i", "--individual", action="store_true")
|
| 1496 |
+
parser.add_argument("-o", "--omniverse", action="store_true")
|
| 1497 |
+
parser.add_argument("--deconvex", action="store_true")
|
| 1498 |
+
parser.add_argument("--center_scene", action="store_true")
|
| 1499 |
+
|
| 1500 |
+
args = parser.parse_args()
|
| 1501 |
+
|
| 1502 |
+
if args.format not in FORMAT_CHOICES:
|
| 1503 |
+
raise ValueError("Unsupported or invalid file format.")
|
| 1504 |
+
|
| 1505 |
+
if args.vertex_colors and args.format not in ["ply", "fbx", "obj"]:
|
| 1506 |
+
raise ValueError("File format does not support vertex colors.")
|
| 1507 |
+
|
| 1508 |
+
if args.format == "ply" and not args.vertex_colors:
|
| 1509 |
+
raise ValueError(".ply export must use vertex colors.")
|
| 1510 |
+
|
| 1511 |
+
return args
|
| 1512 |
+
|
| 1513 |
+
|
| 1514 |
+
if __name__ == "__main__":
|
| 1515 |
+
args = make_args()
|
| 1516 |
+
main(args)
|
embodied_gen/scripts/room_gen/gen_room.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import random
|
| 20 |
+
import shutil
|
| 21 |
+
import subprocess
|
| 22 |
+
import sys
|
| 23 |
+
import time
|
| 24 |
+
from dataclasses import dataclass
|
| 25 |
+
from enum import Enum
|
| 26 |
+
|
| 27 |
+
import tyro
|
| 28 |
+
from embodied_gen.utils.log import logger
|
| 29 |
+
|
| 30 |
+
EXEC_PYTHON = os.environ.get(
|
| 31 |
+
"BLENDER_PYTHON",
|
| 32 |
+
"thirdparty/infinigen/blender/4.2/python/bin/python3.11",
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class RoomType(str, Enum):
|
| 37 |
+
bedroom = "Bedroom"
|
| 38 |
+
livingRoom = "LivingRoom"
|
| 39 |
+
kitchen = "Kitchen"
|
| 40 |
+
bathroom = "Bathroom"
|
| 41 |
+
diningRoom = "DiningRoom"
|
| 42 |
+
office = "Office"
|
| 43 |
+
house = "House"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class Complexity(str, Enum):
|
| 47 |
+
minimalist = "minimalist"
|
| 48 |
+
simple = "simple"
|
| 49 |
+
medium = "medium"
|
| 50 |
+
detail = "detail"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@dataclass
|
| 54 |
+
class GenRoomArgs:
|
| 55 |
+
"""Configuration for single-seed Infinigen room generation and export."""
|
| 56 |
+
|
| 57 |
+
output_root: str
|
| 58 |
+
"""The base output directory for generated rooms."""
|
| 59 |
+
|
| 60 |
+
room_type: RoomType = RoomType.kitchen
|
| 61 |
+
"""The type of room to generate."""
|
| 62 |
+
|
| 63 |
+
seed: int = None
|
| 64 |
+
"""The specific seed number to generate."""
|
| 65 |
+
|
| 66 |
+
# Task Switches (Default to True, use flags like --no-gen to disable)
|
| 67 |
+
gen: bool = True
|
| 68 |
+
"""Whether to run the indoor generation task (generate_indoors)."""
|
| 69 |
+
|
| 70 |
+
urdf: bool = True
|
| 71 |
+
"""Whether to export to URDF (requires generation output)."""
|
| 72 |
+
|
| 73 |
+
usd: bool = True
|
| 74 |
+
"""Whether to export to USD (requires generation output)."""
|
| 75 |
+
|
| 76 |
+
custom_params: str = "embodied_gen/scripts/room_gen/custom_solve.gin"
|
| 77 |
+
|
| 78 |
+
large_scene: bool = False
|
| 79 |
+
"""If True, has_fewer_rooms=False for large scene generation."""
|
| 80 |
+
|
| 81 |
+
complexity: Complexity = Complexity.simple
|
| 82 |
+
"""Complexity level: minimalist, simple, medium, or detail."""
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def run_command(cmd: list[str], task_name: str):
|
| 86 |
+
"""Helper: Use Popen to allow killing the child process on Ctrl+C.
|
| 87 |
+
|
| 88 |
+
Includes execution time logging.
|
| 89 |
+
"""
|
| 90 |
+
logger.info(f"--> Running {task_name}...")
|
| 91 |
+
start_time = time.time()
|
| 92 |
+
process = subprocess.Popen(cmd, env=None)
|
| 93 |
+
try:
|
| 94 |
+
return_code = process.wait()
|
| 95 |
+
if return_code != 0:
|
| 96 |
+
raise subprocess.CalledProcessError(return_code, cmd)
|
| 97 |
+
|
| 98 |
+
elapsed_mins = (time.time() - start_time) / 60
|
| 99 |
+
logger.info(
|
| 100 |
+
f"--> {task_name} successfully in {elapsed_mins:.1f} mins."
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
except KeyboardInterrupt:
|
| 104 |
+
logger.warning(f"\n[Interrupted] Stopping {task_name}...")
|
| 105 |
+
process.kill()
|
| 106 |
+
process.wait()
|
| 107 |
+
sys.exit(0)
|
| 108 |
+
|
| 109 |
+
except subprocess.CalledProcessError as e:
|
| 110 |
+
logger.info(
|
| 111 |
+
f"Error occurred during {task_name}. Exit code: {e.returncode}"
|
| 112 |
+
)
|
| 113 |
+
sys.exit(1)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def generate_room(cfg: GenRoomArgs):
|
| 117 |
+
room_type = cfg.room_type.value
|
| 118 |
+
seed = cfg.seed
|
| 119 |
+
if seed is None:
|
| 120 |
+
seed = random.randint(0, 100000)
|
| 121 |
+
|
| 122 |
+
blender_dir = f"{cfg.output_root}/{room_type}_seed{seed}/blender"
|
| 123 |
+
logger.info(
|
| 124 |
+
f"{room_type} | Seed {seed}: Gen={cfg.gen}, URDF={cfg.urdf}, USD={cfg.usd}"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Complexity configuration mapping
|
| 128 |
+
complexity_config = {
|
| 129 |
+
Complexity.minimalist: {
|
| 130 |
+
"compose_indoors.solve_large_enabled": False,
|
| 131 |
+
"compose_indoors.solve_medium_enabled": False,
|
| 132 |
+
"compose_indoors.solve_small_enabled": False,
|
| 133 |
+
},
|
| 134 |
+
Complexity.simple: {
|
| 135 |
+
"compose_indoors.solve_large_enabled": True,
|
| 136 |
+
"compose_indoors.solve_medium_enabled": False,
|
| 137 |
+
"compose_indoors.solve_small_enabled": False,
|
| 138 |
+
},
|
| 139 |
+
Complexity.medium: {
|
| 140 |
+
"compose_indoors.solve_large_enabled": True,
|
| 141 |
+
"compose_indoors.solve_medium_enabled": True,
|
| 142 |
+
"compose_indoors.solve_small_enabled": False,
|
| 143 |
+
},
|
| 144 |
+
Complexity.detail: {
|
| 145 |
+
"compose_indoors.solve_large_enabled": True,
|
| 146 |
+
"compose_indoors.solve_medium_enabled": True,
|
| 147 |
+
"compose_indoors.solve_small_enabled": True,
|
| 148 |
+
},
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
# Get complexity settings
|
| 152 |
+
complexity_settings = complexity_config[cfg.complexity]
|
| 153 |
+
time_cost_info = {
|
| 154 |
+
Complexity.minimalist: "~1mins",
|
| 155 |
+
Complexity.simple: "~10mins",
|
| 156 |
+
Complexity.medium: "~20mins",
|
| 157 |
+
Complexity.detail: "~70mins",
|
| 158 |
+
}
|
| 159 |
+
logger.info(
|
| 160 |
+
f"Complexity: {cfg.complexity.value} (estimated time: {time_cost_info[cfg.complexity]})"
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
if cfg.gen:
|
| 164 |
+
dst_gin = "thirdparty/infinigen/infinigen_examples/configs_indoor/custom_solve.gin"
|
| 165 |
+
shutil.copy(cfg.custom_params, dst_gin)
|
| 166 |
+
cmd_generate = [
|
| 167 |
+
EXEC_PYTHON,
|
| 168 |
+
"embodied_gen/scripts/room_gen/run_generate_indoors.py",
|
| 169 |
+
"--seed",
|
| 170 |
+
str(seed),
|
| 171 |
+
"--task",
|
| 172 |
+
"coarse",
|
| 173 |
+
"--output_folder",
|
| 174 |
+
blender_dir,
|
| 175 |
+
"-g",
|
| 176 |
+
"custom_solve.gin",
|
| 177 |
+
]
|
| 178 |
+
if room_type == "House":
|
| 179 |
+
has_fewer_rooms_value = "False" if cfg.large_scene else "True"
|
| 180 |
+
cmd_generate.append("-p")
|
| 181 |
+
cmd_generate.append(
|
| 182 |
+
f'home_room_constraints.has_fewer_rooms={has_fewer_rooms_value}'
|
| 183 |
+
)
|
| 184 |
+
else:
|
| 185 |
+
cmd_generate.append("-p")
|
| 186 |
+
cmd_generate.append(
|
| 187 |
+
f'restrict_solving.restrict_parent_rooms=["{room_type}"]'
|
| 188 |
+
)
|
| 189 |
+
cmd_generate.append("restrict_solving.solve_max_rooms=1")
|
| 190 |
+
if room_type == "Office":
|
| 191 |
+
cmd_generate.append("home_room_constraints.office_only=True")
|
| 192 |
+
cmd_generate.append(
|
| 193 |
+
f"compose_indoors.solve_large_enabled={complexity_settings['compose_indoors.solve_large_enabled']}"
|
| 194 |
+
)
|
| 195 |
+
cmd_generate.append(
|
| 196 |
+
f"compose_indoors.solve_medium_enabled={complexity_settings['compose_indoors.solve_medium_enabled']}"
|
| 197 |
+
)
|
| 198 |
+
cmd_generate.append(
|
| 199 |
+
f"compose_indoors.solve_small_enabled={complexity_settings['compose_indoors.solve_small_enabled']}"
|
| 200 |
+
)
|
| 201 |
+
run_command(cmd_generate, "Room Generation")
|
| 202 |
+
|
| 203 |
+
if cfg.urdf:
|
| 204 |
+
if not os.path.exists(blender_dir) and not cfg.gen:
|
| 205 |
+
logger.warning(f"Warning: {blender_dir} not found. Skipping URDF.")
|
| 206 |
+
else:
|
| 207 |
+
cmd_export_urdf = [
|
| 208 |
+
EXEC_PYTHON,
|
| 209 |
+
"embodied_gen/scripts/room_gen/export_scene.py",
|
| 210 |
+
"--input_folder",
|
| 211 |
+
blender_dir,
|
| 212 |
+
"--output_folder",
|
| 213 |
+
f"{cfg.output_root}/{room_type}_seed{seed}/urdf",
|
| 214 |
+
"-f",
|
| 215 |
+
"obj",
|
| 216 |
+
"-r",
|
| 217 |
+
"512",
|
| 218 |
+
"--individual",
|
| 219 |
+
"--deconvex",
|
| 220 |
+
"--center_scene",
|
| 221 |
+
]
|
| 222 |
+
run_command(cmd_export_urdf, "Export URDF")
|
| 223 |
+
|
| 224 |
+
if cfg.usd:
|
| 225 |
+
if not os.path.exists(blender_dir) and not cfg.gen:
|
| 226 |
+
logger.warning(f"Warning: {blender_dir} not found. Skipping USD.")
|
| 227 |
+
else:
|
| 228 |
+
cmd_export_usd = [
|
| 229 |
+
EXEC_PYTHON,
|
| 230 |
+
"embodied_gen/scripts/room_gen/export_scene.py",
|
| 231 |
+
"--input_folder",
|
| 232 |
+
blender_dir,
|
| 233 |
+
"--output_folder",
|
| 234 |
+
f"{cfg.output_root}/{room_type}_seed{seed}/usd",
|
| 235 |
+
"-f",
|
| 236 |
+
"usdc",
|
| 237 |
+
"-r",
|
| 238 |
+
"512",
|
| 239 |
+
"--omniverse",
|
| 240 |
+
"--center_scene",
|
| 241 |
+
]
|
| 242 |
+
run_command(cmd_export_usd, "Export USD")
|
| 243 |
+
|
| 244 |
+
logger.info(f"\n=== Completed {room_type} Seed {seed} ===")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
if __name__ == "__main__":
|
| 248 |
+
try:
|
| 249 |
+
cfg = tyro.cli(GenRoomArgs)
|
| 250 |
+
generate_room(cfg)
|
| 251 |
+
except KeyboardInterrupt:
|
| 252 |
+
logger.info("\nProgram interrupted by user (Cmd+C). Exiting.")
|
| 253 |
+
sys.exit(0)
|
embodied_gen/scripts/room_gen/route_room.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
import random
|
| 21 |
+
|
| 22 |
+
import json_repair
|
| 23 |
+
from embodied_gen.utils.gpt_clients import GPT_CLIENT, GPTclient
|
| 24 |
+
from embodied_gen.utils.log import logger
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
"InfinigenGenRouter",
|
| 28 |
+
"DEFAULT_ROUTER_PROMPT",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
DEFAULT_ROUTER_PROMPT = """
|
| 32 |
+
You are given a natural-language description of a household task or an indoor
|
| 33 |
+
scene involving objects.
|
| 34 |
+
|
| 35 |
+
Select all rooms from the predefined list below where this task or scene could
|
| 36 |
+
plausibly occur:
|
| 37 |
+
["Bedroom", "LivingRoom", "Kitchen", "Bathroom", "DiningRoom", "Office"]
|
| 38 |
+
|
| 39 |
+
Rules:
|
| 40 |
+
1. Output must be a valid JSON nested array (2D list).
|
| 41 |
+
Format: [["Room_List"], "Complexity_Level"]
|
| 42 |
+
|
| 43 |
+
2. Room Selection Logic (Index 0):
|
| 44 |
+
- Standard Case: If the task is contained within specific rooms, select
|
| 45 |
+
the relevant room name(s).
|
| 46 |
+
- Special Case: If the task involves moving objects between different rooms
|
| 47 |
+
(navigation/transport) or implies generating/referencing a complete house
|
| 48 |
+
layout, use ["House"].
|
| 49 |
+
- If no rooms are suitable and it is not a "House" case, randomly select
|
| 50 |
+
one room.
|
| 51 |
+
|
| 52 |
+
3. Complexity Evaluation Logic (Index 1):
|
| 53 |
+
- General Rule: Default to "medium".
|
| 54 |
+
- Overrides (Apply these only if the description fits the specific
|
| 55 |
+
criteria below):
|
| 56 |
+
- "minimalist": If explicitly stated as minimalist.
|
| 57 |
+
- "simple": If the scene is explicitly described as "simple", "basic".
|
| 58 |
+
- "detail": If the scene is described as "complex", "detailed".
|
| 59 |
+
|
| 60 |
+
4. Do not include any explanations or additional text.
|
| 61 |
+
|
| 62 |
+
Example 1:
|
| 63 |
+
Task: minimalist/empty Bedroom.
|
| 64 |
+
Answer: [["Bedroom"], "minimalist"]
|
| 65 |
+
|
| 66 |
+
Example 2:
|
| 67 |
+
Task: Wiping the table in an simple room.
|
| 68 |
+
Answer: [["DiningRoom", "Kitchen"], "simple"]
|
| 69 |
+
|
| 70 |
+
Example 3:
|
| 71 |
+
Task: Take the vase from the living room shelf and navigate to the bedroom
|
| 72 |
+
to pack it.
|
| 73 |
+
Answer: [["House"], "medium"]
|
| 74 |
+
|
| 75 |
+
Example 4:
|
| 76 |
+
Task: Put the apple into the fruit bowl in a complex/detailed env.
|
| 77 |
+
Answer: [["Kitchen", "DiningRoom", "LivingRoom"], "detail"]
|
| 78 |
+
|
| 79 |
+
Task: {prompt}
|
| 80 |
+
Answer:
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class InfinigenGenRouter:
|
| 85 |
+
"""Router that maps task descriptions to room(s) and complexity via GPT."""
|
| 86 |
+
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
gpt_client: GPTclient,
|
| 90 |
+
prompt: str | None = None,
|
| 91 |
+
) -> None:
|
| 92 |
+
"""Initialize the router.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
gpt_client: Client used to query the LLM.
|
| 96 |
+
prompt: Optional custom system/template prompt. Uses
|
| 97 |
+
DEFAULT_ROUTER_PROMPT if None.
|
| 98 |
+
|
| 99 |
+
"""
|
| 100 |
+
self.gpt_client = gpt_client
|
| 101 |
+
self.prompt = prompt if prompt is not None else DEFAULT_ROUTER_PROMPT
|
| 102 |
+
|
| 103 |
+
def query(self, task_description: str) -> tuple[str, str]:
|
| 104 |
+
"""Map a task description to a room and complexity level.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
task_description: Natural-language description of the task or scene.
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
Tuple of (room_name, complexity_level). room_name is one room
|
| 111 |
+
chosen at random from the list of rooms returned by the LLM.
|
| 112 |
+
|
| 113 |
+
"""
|
| 114 |
+
filled_prompt = self.prompt.format(prompt=task_description)
|
| 115 |
+
response_text = self.gpt_client.query(text_prompt=filled_prompt)
|
| 116 |
+
parsed = json_repair.loads(response_text)
|
| 117 |
+
|
| 118 |
+
room_list = parsed[0]
|
| 119 |
+
complexity = parsed[1]
|
| 120 |
+
room_name = random.choice(room_list)
|
| 121 |
+
|
| 122 |
+
return room_name, complexity
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def main() -> None:
|
| 126 |
+
"""Demo: run the router on a sample task."""
|
| 127 |
+
agent = InfinigenGenRouter(gpt_client=GPT_CLIENT)
|
| 128 |
+
room, complexity = agent.query(
|
| 129 |
+
"Put the apple into the fruit bowl, complex env"
|
| 130 |
+
)
|
| 131 |
+
logger.info(f"Room: {room}, Complexity: {complexity}.")
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
main()
|
embodied_gen/scripts/room_gen/run_generate_indoors.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Wrapper to run infinigen_examples.generate_indoors with correct gin registration.
|
| 19 |
+
# Importing (rather than python -m) ensures the module loads with full module path,
|
| 20 |
+
# avoiding the "Ambiguous selector 'compose_indoors'" error from __main__.
|
| 21 |
+
from embodied_gen.utils.monkey_patch.infinigen import (
|
| 22 |
+
add_run_main_to_module,
|
| 23 |
+
monkey_patch_infinigen,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
monkey_patch_infinigen()
|
| 27 |
+
|
| 28 |
+
import infinigen_examples.generate_indoors as gi
|
| 29 |
+
|
| 30 |
+
add_run_main_to_module(gi)
|
| 31 |
+
gi._run_main()
|
embodied_gen/scripts/room_gen/visualize_floorplan.py
ADDED
|
@@ -0,0 +1,1186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
import logging
|
| 20 |
+
import os
|
| 21 |
+
import random
|
| 22 |
+
import re
|
| 23 |
+
import xml.etree.ElementTree as ET
|
| 24 |
+
from dataclasses import dataclass, field
|
| 25 |
+
from shutil import copy2, copytree
|
| 26 |
+
from typing import TYPE_CHECKING, Literal
|
| 27 |
+
|
| 28 |
+
import matplotlib.pyplot as plt
|
| 29 |
+
import numpy as np
|
| 30 |
+
import trimesh
|
| 31 |
+
import tyro
|
| 32 |
+
from scipy.spatial.transform import Rotation as R
|
| 33 |
+
from shapely.affinity import translate
|
| 34 |
+
from shapely.geometry import MultiPoint, MultiPolygon, Point, Polygon
|
| 35 |
+
from shapely.ops import unary_union
|
| 36 |
+
|
| 37 |
+
if TYPE_CHECKING:
|
| 38 |
+
from matplotlib.axes import Axes
|
| 39 |
+
|
| 40 |
+
logging.basicConfig(
|
| 41 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
| 42 |
+
level=logging.INFO,
|
| 43 |
+
)
|
| 44 |
+
logger = logging.getLogger(__name__)
|
| 45 |
+
|
| 46 |
+
# Type aliases
|
| 47 |
+
Geometry = Polygon | MultiPolygon
|
| 48 |
+
|
| 49 |
+
# Constants
|
| 50 |
+
DEFAULT_MESH_SAMPLE_NUM = 50000
|
| 51 |
+
DEFAULT_IGNORE_ITEMS = ("ceiling", "light", "exterior")
|
| 52 |
+
DEFAULT_ROTATION_RPY = (1.57, 0.0, 0.0)
|
| 53 |
+
DEFAULT_MAX_PLACEMENT_ATTEMPTS = 2000
|
| 54 |
+
|
| 55 |
+
__all__ = [
|
| 56 |
+
"points_to_polygon",
|
| 57 |
+
"get_actionable_surface",
|
| 58 |
+
"FloorplanVisualizer",
|
| 59 |
+
"UrdfSemanticInfoCollector",
|
| 60 |
+
"Scene3DGenConfig",
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@dataclass
|
| 65 |
+
class Scene3DGenConfig:
|
| 66 |
+
"""Configuration for 3D scene generation and floorplan visualization."""
|
| 67 |
+
|
| 68 |
+
urdf_path: str
|
| 69 |
+
"""Path to the input URDF scene file."""
|
| 70 |
+
|
| 71 |
+
output_path: str
|
| 72 |
+
"""Path to save the floorplan visualization image."""
|
| 73 |
+
|
| 74 |
+
# Optional paths
|
| 75 |
+
usd_path: str | None = None
|
| 76 |
+
"""Optional path to the USD scene file for USD export."""
|
| 77 |
+
|
| 78 |
+
asset_path: str | None = None
|
| 79 |
+
"""Optional path to the asset mesh to add to the scene."""
|
| 80 |
+
|
| 81 |
+
# Instance configuration
|
| 82 |
+
instance_key: str = "inserted_object"
|
| 83 |
+
"""Unique key for the added instance."""
|
| 84 |
+
|
| 85 |
+
in_room: str | None = None
|
| 86 |
+
"""Optional room name to constrain asset placement."""
|
| 87 |
+
|
| 88 |
+
on_instance: str | None = None
|
| 89 |
+
"""Optional instance name to place the asset on top of."""
|
| 90 |
+
|
| 91 |
+
place_strategy: Literal["top", "random"] = "random"
|
| 92 |
+
"""Placement strategy for the asset."""
|
| 93 |
+
|
| 94 |
+
rotation_rpy: tuple[float, float, float] = DEFAULT_ROTATION_RPY
|
| 95 |
+
"""Rotation in roll-pitch-yaw (radians)."""
|
| 96 |
+
|
| 97 |
+
# Collector configuration
|
| 98 |
+
ignore_items: list[str] = field(
|
| 99 |
+
default_factory=lambda: list(DEFAULT_IGNORE_ITEMS)
|
| 100 |
+
)
|
| 101 |
+
"""List of item name patterns to ignore during parsing."""
|
| 102 |
+
|
| 103 |
+
mesh_sample_num: int = DEFAULT_MESH_SAMPLE_NUM
|
| 104 |
+
"""Number of points to sample from meshes."""
|
| 105 |
+
|
| 106 |
+
max_placement_attempts: int = DEFAULT_MAX_PLACEMENT_ATTEMPTS
|
| 107 |
+
"""Maximum attempts for asset placement."""
|
| 108 |
+
|
| 109 |
+
# Output flags
|
| 110 |
+
update_urdf: bool = True
|
| 111 |
+
"""Whether to update and save the URDF file."""
|
| 112 |
+
|
| 113 |
+
update_usd: bool = True
|
| 114 |
+
"""Whether to update and save the USD file."""
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def points_to_polygon(
|
| 118 |
+
points: np.ndarray,
|
| 119 |
+
smooth_thresh: float = 0.2,
|
| 120 |
+
scanline_step: float = 0.01,
|
| 121 |
+
) -> Polygon:
|
| 122 |
+
"""Convert point clouds into polygon contours using sweep line algorithm.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
points: Array of 2D points with shape (N, 2).
|
| 126 |
+
smooth_thresh: Buffer threshold for smoothing the polygon.
|
| 127 |
+
scanline_step: Step size for the scanline sweep.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
A Shapely Polygon representing the contour of the point cloud.
|
| 131 |
+
|
| 132 |
+
"""
|
| 133 |
+
if len(points) == 0:
|
| 134 |
+
return Polygon()
|
| 135 |
+
|
| 136 |
+
ys = points[:, 1]
|
| 137 |
+
y_min, y_max = ys.min(), ys.max()
|
| 138 |
+
y_values = np.arange(y_min, y_max + scanline_step, scanline_step)
|
| 139 |
+
|
| 140 |
+
upper: list[list[float]] = []
|
| 141 |
+
lower: list[list[float]] = []
|
| 142 |
+
|
| 143 |
+
for y in y_values:
|
| 144 |
+
pts_in_strip = points[(ys >= y) & (ys < y + scanline_step)]
|
| 145 |
+
if len(pts_in_strip) == 0:
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
xs = pts_in_strip[:, 0]
|
| 149 |
+
upper.append([xs.max(), y])
|
| 150 |
+
lower.append([xs.min(), y])
|
| 151 |
+
|
| 152 |
+
contour = upper + lower[::-1]
|
| 153 |
+
if len(contour) < 3:
|
| 154 |
+
return Polygon()
|
| 155 |
+
|
| 156 |
+
poly = Polygon(contour)
|
| 157 |
+
return poly.buffer(smooth_thresh).buffer(-smooth_thresh)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def get_actionable_surface(
|
| 161 |
+
mesh: trimesh.Trimesh,
|
| 162 |
+
tol_angle: int = 10,
|
| 163 |
+
tol_z: float = 0.02,
|
| 164 |
+
area_tolerance: float = 0.15,
|
| 165 |
+
place_strategy: Literal["top", "random"] = "random",
|
| 166 |
+
) -> tuple[float, Geometry]:
|
| 167 |
+
"""Extract the actionable (placeable) surface from a mesh.
|
| 168 |
+
|
| 169 |
+
Finds upward-facing surfaces and returns the best one based on the
|
| 170 |
+
placement strategy.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
mesh: The input trimesh object.
|
| 174 |
+
tol_angle: Angle tolerance in degrees for detecting up-facing normals.
|
| 175 |
+
tol_z: Z-coordinate tolerance for clustering faces.
|
| 176 |
+
area_tolerance: Tolerance for selecting candidate surfaces by area.
|
| 177 |
+
place_strategy: Either "top" (highest surface) or "random".
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
A tuple of (z_height, surface_polygon) representing the selected
|
| 181 |
+
actionable surface.
|
| 182 |
+
|
| 183 |
+
"""
|
| 184 |
+
up_vec = np.array([0, 0, 1])
|
| 185 |
+
dots = np.dot(mesh.face_normals, up_vec)
|
| 186 |
+
valid_mask = dots > np.cos(np.deg2rad(tol_angle))
|
| 187 |
+
|
| 188 |
+
if not np.any(valid_mask):
|
| 189 |
+
logger.warning(
|
| 190 |
+
"No up-facing surfaces found. Falling back to bounding box top."
|
| 191 |
+
)
|
| 192 |
+
verts = mesh.vertices[:, :2]
|
| 193 |
+
return mesh.bounds[1][2], MultiPoint(verts).convex_hull
|
| 194 |
+
|
| 195 |
+
valid_faces_indices = np.where(valid_mask)[0]
|
| 196 |
+
face_z = mesh.triangles_center[valid_mask][:, 2]
|
| 197 |
+
face_areas = mesh.area_faces[valid_mask]
|
| 198 |
+
|
| 199 |
+
z_clusters = _cluster_faces_by_z(
|
| 200 |
+
face_z, face_areas, valid_faces_indices, tol_z
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
if not z_clusters:
|
| 204 |
+
return mesh.bounds[1][2], MultiPoint(mesh.vertices[:, :2]).convex_hull
|
| 205 |
+
|
| 206 |
+
selected_z, selected_data = _select_surface_cluster(
|
| 207 |
+
z_clusters, area_tolerance, place_strategy
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
cluster_faces = mesh.faces[selected_data["indices"]]
|
| 211 |
+
temp_mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=cluster_faces)
|
| 212 |
+
samples, _ = trimesh.sample.sample_surface(temp_mesh, 10000)
|
| 213 |
+
|
| 214 |
+
if len(samples) < 3:
|
| 215 |
+
logger.warning(
|
| 216 |
+
f"Failed to sample enough points on layer Z={selected_z}. "
|
| 217 |
+
"Returning empty polygon."
|
| 218 |
+
)
|
| 219 |
+
return selected_z, Polygon()
|
| 220 |
+
|
| 221 |
+
surface_poly = MultiPoint(samples[:, :2]).convex_hull
|
| 222 |
+
return selected_z, surface_poly
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _cluster_faces_by_z(
|
| 226 |
+
face_z: np.ndarray,
|
| 227 |
+
face_areas: np.ndarray,
|
| 228 |
+
face_indices: np.ndarray,
|
| 229 |
+
tol_z: float,
|
| 230 |
+
) -> dict[float, dict]:
|
| 231 |
+
"""Cluster mesh faces by their Z coordinate.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
face_z: Z coordinates of face centers.
|
| 235 |
+
face_areas: Areas of each face.
|
| 236 |
+
face_indices: Original indices of the faces.
|
| 237 |
+
tol_z: Tolerance for Z clustering.
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
Dictionary mapping Z values to cluster data (area and indices).
|
| 241 |
+
|
| 242 |
+
"""
|
| 243 |
+
z_clusters: dict[float, dict] = {}
|
| 244 |
+
|
| 245 |
+
for i, z in enumerate(face_z):
|
| 246 |
+
key = round(z / tol_z) * tol_z
|
| 247 |
+
|
| 248 |
+
if key not in z_clusters:
|
| 249 |
+
z_clusters[key] = {"area": 0.0, "indices": []}
|
| 250 |
+
|
| 251 |
+
z_clusters[key]["area"] += face_areas[i]
|
| 252 |
+
z_clusters[key]["indices"].append(face_indices[i])
|
| 253 |
+
|
| 254 |
+
return z_clusters
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _select_surface_cluster(
|
| 258 |
+
z_clusters: dict[float, dict],
|
| 259 |
+
area_tolerance: float,
|
| 260 |
+
place_strategy: Literal["top", "random"],
|
| 261 |
+
) -> tuple[float, dict]:
|
| 262 |
+
"""Select the best surface cluster based on strategy.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
z_clusters: Dictionary of Z clusters with area and indices.
|
| 266 |
+
area_tolerance: Tolerance for candidate selection by area.
|
| 267 |
+
place_strategy: Either "top" or "random".
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
Tuple of (selected_z, cluster_data).
|
| 271 |
+
|
| 272 |
+
"""
|
| 273 |
+
max_area = max(c["area"] for c in z_clusters.values())
|
| 274 |
+
candidates = [
|
| 275 |
+
(z, data)
|
| 276 |
+
for z, data in z_clusters.items()
|
| 277 |
+
if data["area"] >= max_area * (1.0 - area_tolerance)
|
| 278 |
+
]
|
| 279 |
+
|
| 280 |
+
if not candidates:
|
| 281 |
+
best_item = max(z_clusters.items(), key=lambda x: x[1]["area"])
|
| 282 |
+
candidates = [best_item]
|
| 283 |
+
|
| 284 |
+
if place_strategy == "random":
|
| 285 |
+
selected_z, selected_data = random.choice(candidates)
|
| 286 |
+
logger.info(
|
| 287 |
+
f"Strategy 'random': Selected Z={selected_z:.3f} "
|
| 288 |
+
f"(Area={selected_data['area']:.3f}) "
|
| 289 |
+
f"from {len(candidates)} candidates."
|
| 290 |
+
)
|
| 291 |
+
else:
|
| 292 |
+
candidates.sort(key=lambda x: x[0], reverse=True)
|
| 293 |
+
selected_z, selected_data = candidates[0]
|
| 294 |
+
logger.info(
|
| 295 |
+
f"Strategy 'top': Selected highest Z={selected_z:.3f} "
|
| 296 |
+
f"(Area={selected_data['area']:.3f})"
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
return selected_z, selected_data
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class FloorplanVisualizer:
|
| 303 |
+
"""Static utility class for visualizing floorplans."""
|
| 304 |
+
|
| 305 |
+
@staticmethod
|
| 306 |
+
def draw_poly(ax: Axes, poly: Geometry, **kwargs) -> None:
|
| 307 |
+
"""Draw a polygon or multi-polygon on matplotlib axes.
|
| 308 |
+
|
| 309 |
+
Args:
|
| 310 |
+
ax: Matplotlib axes object.
|
| 311 |
+
poly: Shapely Polygon or MultiPolygon to draw.
|
| 312 |
+
**kwargs: Additional arguments passed to ax.fill().
|
| 313 |
+
|
| 314 |
+
"""
|
| 315 |
+
if poly.is_empty:
|
| 316 |
+
return
|
| 317 |
+
|
| 318 |
+
geoms = poly.geoms if hasattr(poly, "geoms") else [poly]
|
| 319 |
+
|
| 320 |
+
color = kwargs.pop("color", None)
|
| 321 |
+
if color is None:
|
| 322 |
+
cmap = plt.get_cmap("tab10")
|
| 323 |
+
colors = [cmap(i) for i in range(len(geoms))]
|
| 324 |
+
else:
|
| 325 |
+
colors = [color] * len(geoms)
|
| 326 |
+
|
| 327 |
+
for i, p in enumerate(geoms):
|
| 328 |
+
if p.is_empty:
|
| 329 |
+
continue
|
| 330 |
+
x, y = p.exterior.xy
|
| 331 |
+
ax.fill(x, y, facecolor=colors[i], **kwargs)
|
| 332 |
+
|
| 333 |
+
@classmethod
|
| 334 |
+
def plot(
|
| 335 |
+
cls,
|
| 336 |
+
rooms: dict[str, Geometry],
|
| 337 |
+
footprints: dict[str, Geometry],
|
| 338 |
+
occ_area: Geometry,
|
| 339 |
+
save_path: str,
|
| 340 |
+
) -> None:
|
| 341 |
+
"""Generate and save a floorplan visualization.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
rooms: Dictionary mapping room names to floor polygons.
|
| 345 |
+
footprints: Dictionary mapping object names to footprint polygons.
|
| 346 |
+
occ_area: Union of all occupied areas.
|
| 347 |
+
save_path: Path to save the output image.
|
| 348 |
+
|
| 349 |
+
"""
|
| 350 |
+
fig, ax = plt.subplots(figsize=(10, 10))
|
| 351 |
+
ax.set_aspect("equal")
|
| 352 |
+
cmap_rooms = plt.get_cmap("Pastel1")
|
| 353 |
+
|
| 354 |
+
cls._draw_room_floors(ax, rooms, cmap_rooms)
|
| 355 |
+
cls._draw_occupied_area(ax, occ_area)
|
| 356 |
+
cls._draw_footprint_outlines(ax, footprints)
|
| 357 |
+
cls._draw_footprint_labels(ax, footprints)
|
| 358 |
+
cls._draw_room_labels(ax, rooms)
|
| 359 |
+
cls._configure_axes(ax, rooms, occ_area)
|
| 360 |
+
|
| 361 |
+
plt.tight_layout()
|
| 362 |
+
plt.savefig(save_path, dpi=300)
|
| 363 |
+
plt.close(fig)
|
| 364 |
+
logger.info(f"Saved floorplan to: {save_path}")
|
| 365 |
+
|
| 366 |
+
@classmethod
|
| 367 |
+
def _draw_room_floors(
|
| 368 |
+
cls,
|
| 369 |
+
ax: Axes,
|
| 370 |
+
rooms: dict[str, Geometry],
|
| 371 |
+
cmap: plt.cm.ScalarMappable,
|
| 372 |
+
) -> None:
|
| 373 |
+
"""Draw colored room floor polygons (Layer 1)."""
|
| 374 |
+
for i, (name, poly) in enumerate(rooms.items()):
|
| 375 |
+
color = cmap(i % cmap.N)
|
| 376 |
+
cls.draw_poly(
|
| 377 |
+
ax,
|
| 378 |
+
poly,
|
| 379 |
+
color=color,
|
| 380 |
+
alpha=0.6,
|
| 381 |
+
edgecolor="black",
|
| 382 |
+
linestyle="--",
|
| 383 |
+
zorder=1,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
@classmethod
|
| 387 |
+
def _draw_occupied_area(cls, ax: Axes, occ_area: Geometry) -> None:
|
| 388 |
+
"""Draw the occupied area overlay (Layer 2)."""
|
| 389 |
+
cls.draw_poly(
|
| 390 |
+
ax,
|
| 391 |
+
occ_area,
|
| 392 |
+
color="tab:blue",
|
| 393 |
+
alpha=0.3,
|
| 394 |
+
lw=0,
|
| 395 |
+
zorder=2,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
@staticmethod
|
| 399 |
+
def _draw_footprint_outlines(
|
| 400 |
+
ax: Axes,
|
| 401 |
+
footprints: dict[str, Geometry],
|
| 402 |
+
) -> None:
|
| 403 |
+
"""Draw footprint outlines (Layer 3)."""
|
| 404 |
+
for poly in footprints.values():
|
| 405 |
+
if poly.is_empty:
|
| 406 |
+
continue
|
| 407 |
+
geoms = poly.geoms if hasattr(poly, "geoms") else [poly]
|
| 408 |
+
for p in geoms:
|
| 409 |
+
ax.plot(*p.exterior.xy, "--", lw=0.8, color="gray", zorder=3)
|
| 410 |
+
|
| 411 |
+
@staticmethod
|
| 412 |
+
def _draw_footprint_labels(
|
| 413 |
+
ax: Axes,
|
| 414 |
+
footprints: dict[str, Geometry],
|
| 415 |
+
) -> None:
|
| 416 |
+
"""Draw footprint text labels (Layer 4)."""
|
| 417 |
+
for name, poly in footprints.items():
|
| 418 |
+
if poly.is_empty:
|
| 419 |
+
continue
|
| 420 |
+
ax.text(
|
| 421 |
+
poly.centroid.x,
|
| 422 |
+
poly.centroid.y,
|
| 423 |
+
name,
|
| 424 |
+
fontsize=5,
|
| 425 |
+
ha="center",
|
| 426 |
+
va="center",
|
| 427 |
+
bbox={
|
| 428 |
+
"facecolor": "white",
|
| 429 |
+
"alpha": 0.5,
|
| 430 |
+
"edgecolor": "none",
|
| 431 |
+
"pad": 0.1,
|
| 432 |
+
},
|
| 433 |
+
zorder=4,
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
@staticmethod
|
| 437 |
+
def _draw_room_labels(ax: Axes, rooms: dict[str, Geometry]) -> None:
|
| 438 |
+
"""Draw room text labels (Layer 5)."""
|
| 439 |
+
for name, poly in rooms.items():
|
| 440 |
+
if poly.is_empty:
|
| 441 |
+
continue
|
| 442 |
+
label = name.replace("_floor", "")
|
| 443 |
+
ax.text(
|
| 444 |
+
poly.centroid.x,
|
| 445 |
+
poly.centroid.y,
|
| 446 |
+
label,
|
| 447 |
+
fontsize=9,
|
| 448 |
+
color="black",
|
| 449 |
+
weight="bold",
|
| 450 |
+
ha="center",
|
| 451 |
+
va="center",
|
| 452 |
+
bbox={
|
| 453 |
+
"facecolor": "lightgray",
|
| 454 |
+
"alpha": 0.7,
|
| 455 |
+
"edgecolor": "black",
|
| 456 |
+
"boxstyle": "round,pad=0.3",
|
| 457 |
+
},
|
| 458 |
+
zorder=5,
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
@staticmethod
|
| 462 |
+
def _configure_axes(
|
| 463 |
+
ax: Axes,
|
| 464 |
+
rooms: dict[str, Geometry],
|
| 465 |
+
occ_area: Geometry,
|
| 466 |
+
) -> None:
|
| 467 |
+
"""Configure axes limits and labels."""
|
| 468 |
+
total_geom = unary_union(list(rooms.values()) + [occ_area])
|
| 469 |
+
|
| 470 |
+
if total_geom.is_empty:
|
| 471 |
+
minx, miny, maxx, maxy = -1, -1, 1, 1
|
| 472 |
+
else:
|
| 473 |
+
minx, miny, maxx, maxy = total_geom.bounds
|
| 474 |
+
|
| 475 |
+
margin_x = max((maxx - minx) * 0.05, 0.5)
|
| 476 |
+
margin_y = max((maxy - miny) * 0.05, 0.5)
|
| 477 |
+
|
| 478 |
+
ax.set_xlim(minx - margin_x, maxx + margin_x)
|
| 479 |
+
ax.set_ylim(miny - margin_y, maxy + margin_y)
|
| 480 |
+
ax.set_title("Floorplan Analysis", fontsize=14)
|
| 481 |
+
ax.set_xlabel("X (m)")
|
| 482 |
+
ax.set_ylabel("Y (m)")
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
class UrdfSemanticInfoCollector:
|
| 486 |
+
"""Collector for URDF semantic information.
|
| 487 |
+
|
| 488 |
+
Parses URDF files to extract room layouts, object footprints, and
|
| 489 |
+
provides methods for adding new instances and updating URDF/USD files.
|
| 490 |
+
|
| 491 |
+
Attributes:
|
| 492 |
+
mesh_sample_num: Number of points to sample from meshes.
|
| 493 |
+
ignore_items: List of item name patterns to ignore.
|
| 494 |
+
instances: Dictionary of instance name to footprint polygon.
|
| 495 |
+
instance_meta: Dictionary of instance metadata (mesh path, pose).
|
| 496 |
+
rooms: Dictionary of room polygons.
|
| 497 |
+
footprints: Dictionary of object footprints.
|
| 498 |
+
occ_area: Union of all occupied areas.
|
| 499 |
+
floor_union: Union of all floor polygons.
|
| 500 |
+
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
def __init__(
|
| 504 |
+
self,
|
| 505 |
+
mesh_sample_num: int = DEFAULT_MESH_SAMPLE_NUM,
|
| 506 |
+
ignore_items: list[str] | None = None,
|
| 507 |
+
) -> None:
|
| 508 |
+
"""Initialize the collector.
|
| 509 |
+
|
| 510 |
+
Args:
|
| 511 |
+
mesh_sample_num: Number of points to sample from meshes.
|
| 512 |
+
ignore_items: List of item name patterns to ignore during parsing.
|
| 513 |
+
|
| 514 |
+
"""
|
| 515 |
+
self.mesh_sample_num = mesh_sample_num
|
| 516 |
+
self.ignore_items = ignore_items or list(DEFAULT_IGNORE_ITEMS)
|
| 517 |
+
|
| 518 |
+
self.instances: dict[str, Polygon] = {}
|
| 519 |
+
self.instance_meta: dict[str, dict] = {}
|
| 520 |
+
self.rooms: dict[str, Geometry] = {}
|
| 521 |
+
self.footprints: dict[str, Geometry] = {}
|
| 522 |
+
self.occ_area: Geometry = Polygon()
|
| 523 |
+
self.floor_union: Geometry = Polygon()
|
| 524 |
+
|
| 525 |
+
self.urdf_path: str = ""
|
| 526 |
+
self._tree: ET.ElementTree | None = None
|
| 527 |
+
self._root: ET.Element | None = None
|
| 528 |
+
|
| 529 |
+
def _get_transform(
|
| 530 |
+
self,
|
| 531 |
+
joint_elem: ET.Element,
|
| 532 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 533 |
+
"""Extract transform (xyz, rpy) from a joint element.
|
| 534 |
+
|
| 535 |
+
Args:
|
| 536 |
+
joint_elem: XML Element representing a URDF joint.
|
| 537 |
+
|
| 538 |
+
Returns:
|
| 539 |
+
Tuple of (xyz, rpy) arrays.
|
| 540 |
+
|
| 541 |
+
"""
|
| 542 |
+
origin = joint_elem.find("origin")
|
| 543 |
+
if origin is not None:
|
| 544 |
+
xyz = np.fromstring(origin.attrib.get("xyz", "0 0 0"), sep=" ")
|
| 545 |
+
rpy = np.fromstring(origin.attrib.get("rpy", "0 0 0"), sep=" ")
|
| 546 |
+
else:
|
| 547 |
+
xyz, rpy = np.zeros(3), np.zeros(3)
|
| 548 |
+
return xyz, rpy
|
| 549 |
+
|
| 550 |
+
def _process_mesh_to_poly(
|
| 551 |
+
self,
|
| 552 |
+
mesh_path: str,
|
| 553 |
+
xyz: np.ndarray,
|
| 554 |
+
rpy: np.ndarray,
|
| 555 |
+
) -> Polygon:
|
| 556 |
+
"""Load mesh file and convert to 2D footprint polygon.
|
| 557 |
+
|
| 558 |
+
Args:
|
| 559 |
+
mesh_path: Path to the mesh file.
|
| 560 |
+
xyz: Translation vector.
|
| 561 |
+
rpy: Rotation in roll-pitch-yaw.
|
| 562 |
+
|
| 563 |
+
Returns:
|
| 564 |
+
Footprint polygon of the mesh.
|
| 565 |
+
|
| 566 |
+
"""
|
| 567 |
+
if not os.path.exists(mesh_path):
|
| 568 |
+
return Polygon()
|
| 569 |
+
|
| 570 |
+
mesh = trimesh.load(mesh_path, force="mesh", skip_materials=True)
|
| 571 |
+
|
| 572 |
+
matrix = np.eye(4)
|
| 573 |
+
matrix[:3, :3] = R.from_euler("xyz", rpy).as_matrix()
|
| 574 |
+
matrix[:3, 3] = xyz
|
| 575 |
+
mesh.apply_transform(matrix)
|
| 576 |
+
|
| 577 |
+
verts = np.asarray(mesh.sample(self.mesh_sample_num))[:, :2]
|
| 578 |
+
return points_to_polygon(verts)
|
| 579 |
+
|
| 580 |
+
def collect(self, urdf_path: str) -> None:
|
| 581 |
+
"""Parse URDF file and collect semantic information.
|
| 582 |
+
|
| 583 |
+
Args:
|
| 584 |
+
urdf_path: Path to the URDF file.
|
| 585 |
+
|
| 586 |
+
"""
|
| 587 |
+
logger.info(f"Collecting URDF semantic info from {urdf_path}")
|
| 588 |
+
self.urdf_path = urdf_path
|
| 589 |
+
urdf_dir = os.path.dirname(urdf_path)
|
| 590 |
+
|
| 591 |
+
self._tree = ET.parse(urdf_path)
|
| 592 |
+
self._root = self._tree.getroot()
|
| 593 |
+
|
| 594 |
+
link_transforms = self._build_link_transforms()
|
| 595 |
+
self._process_links(urdf_dir, link_transforms)
|
| 596 |
+
self._update_internal_state()
|
| 597 |
+
|
| 598 |
+
def _build_link_transforms(
|
| 599 |
+
self,
|
| 600 |
+
) -> dict[str, tuple[np.ndarray, np.ndarray]]:
|
| 601 |
+
"""Build mapping from link names to their transforms.
|
| 602 |
+
|
| 603 |
+
Returns:
|
| 604 |
+
Dictionary mapping link names to (xyz, rpy) tuples.
|
| 605 |
+
|
| 606 |
+
"""
|
| 607 |
+
link_transforms: dict[str, tuple[np.ndarray, np.ndarray]] = {}
|
| 608 |
+
|
| 609 |
+
for joint in self._tree.findall("joint"):
|
| 610 |
+
child = joint.find("child")
|
| 611 |
+
if child is not None:
|
| 612 |
+
link_name = child.attrib["link"]
|
| 613 |
+
link_transforms[link_name] = self._get_transform(joint)
|
| 614 |
+
|
| 615 |
+
return link_transforms
|
| 616 |
+
|
| 617 |
+
def _process_links(
|
| 618 |
+
self,
|
| 619 |
+
urdf_dir: str,
|
| 620 |
+
link_transforms: dict[str, tuple[np.ndarray, np.ndarray]],
|
| 621 |
+
) -> None:
|
| 622 |
+
"""Process all links in the URDF tree.
|
| 623 |
+
|
| 624 |
+
Args:
|
| 625 |
+
urdf_dir: Directory containing the URDF file.
|
| 626 |
+
link_transforms: Dictionary of link transforms.
|
| 627 |
+
|
| 628 |
+
"""
|
| 629 |
+
self.instances = {}
|
| 630 |
+
self.instance_meta = {}
|
| 631 |
+
wall_polys: list[Polygon] = []
|
| 632 |
+
|
| 633 |
+
for link in self._tree.findall("link"):
|
| 634 |
+
name = link.attrib.get("name", "").lower()
|
| 635 |
+
if any(ign in name for ign in self.ignore_items):
|
| 636 |
+
continue
|
| 637 |
+
|
| 638 |
+
visual = link.find("visual")
|
| 639 |
+
if visual is None:
|
| 640 |
+
continue
|
| 641 |
+
|
| 642 |
+
mesh_node = visual.find("geometry/mesh")
|
| 643 |
+
if mesh_node is None:
|
| 644 |
+
continue
|
| 645 |
+
|
| 646 |
+
mesh_path = os.path.join(urdf_dir, mesh_node.attrib["filename"])
|
| 647 |
+
default_transform = (np.zeros(3), np.zeros(3))
|
| 648 |
+
xyz, rpy = link_transforms.get(
|
| 649 |
+
link.attrib["name"], default_transform
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
poly = self._process_mesh_to_poly(mesh_path, xyz, rpy)
|
| 653 |
+
if poly.is_empty:
|
| 654 |
+
continue
|
| 655 |
+
|
| 656 |
+
if "wall" in name:
|
| 657 |
+
wall_polys.append(poly)
|
| 658 |
+
else:
|
| 659 |
+
key = self._process_safe_key_robust(link.attrib["name"])
|
| 660 |
+
self.instances[key] = poly
|
| 661 |
+
self.instance_meta[key] = {
|
| 662 |
+
"mesh_path": mesh_path,
|
| 663 |
+
"xyz": xyz,
|
| 664 |
+
"rpy": rpy,
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
self.instances["walls"] = unary_union(wall_polys)
|
| 668 |
+
|
| 669 |
+
def _update_internal_state(self) -> None:
|
| 670 |
+
"""Update derived state (rooms, footprints, occupied area)."""
|
| 671 |
+
self.rooms = {
|
| 672 |
+
k: v
|
| 673 |
+
for k, v in self.instances.items()
|
| 674 |
+
if "_floor" in k.lower() and not v.is_empty
|
| 675 |
+
}
|
| 676 |
+
|
| 677 |
+
self.footprints = {
|
| 678 |
+
k: v
|
| 679 |
+
for k, v in self.instances.items()
|
| 680 |
+
if k != "walls"
|
| 681 |
+
and "_floor" not in k.lower()
|
| 682 |
+
and "rug" not in k.lower()
|
| 683 |
+
and not v.is_empty
|
| 684 |
+
}
|
| 685 |
+
self.occ_area = unary_union(list(self.footprints.values()))
|
| 686 |
+
self.floor_union = unary_union(list(self.rooms.values()))
|
| 687 |
+
|
| 688 |
+
def _process_safe_key_robust(self, name: str) -> str:
|
| 689 |
+
"""Convert a link name to a safe, normalized key.
|
| 690 |
+
|
| 691 |
+
Args:
|
| 692 |
+
name: Original link name.
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
Normalized key string.
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
if name.endswith("_floor"):
|
| 699 |
+
parts = name.split("_")
|
| 700 |
+
return "_".join(parts[:-2] + ["floor"])
|
| 701 |
+
|
| 702 |
+
if "Factory" in name:
|
| 703 |
+
# Handle infinigen naming convention
|
| 704 |
+
prefix = name.split("Factory")[0]
|
| 705 |
+
suffix = f"_{name.split('_')[-1]}"
|
| 706 |
+
else:
|
| 707 |
+
prefix, suffix = name, ""
|
| 708 |
+
|
| 709 |
+
res = prefix.replace(" ", "_")
|
| 710 |
+
res = re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", res)
|
| 711 |
+
res = res.lower()
|
| 712 |
+
res = re.sub(r"_+", "_", res).strip("_ ")
|
| 713 |
+
|
| 714 |
+
return f"{res}{suffix}"
|
| 715 |
+
|
| 716 |
+
def add_instance(
|
| 717 |
+
self,
|
| 718 |
+
asset_path: str,
|
| 719 |
+
instance_key: str,
|
| 720 |
+
in_room: str | None = None,
|
| 721 |
+
on_instance: str | None = None,
|
| 722 |
+
rotation_rpy: tuple[float, float, float] = DEFAULT_ROTATION_RPY,
|
| 723 |
+
n_max_attempt: int = DEFAULT_MAX_PLACEMENT_ATTEMPTS,
|
| 724 |
+
place_strategy: Literal["top", "random"] = "random",
|
| 725 |
+
) -> list[float] | None:
|
| 726 |
+
"""Add a new instance to the scene with automatic placement.
|
| 727 |
+
|
| 728 |
+
Args:
|
| 729 |
+
asset_path: Path to the asset mesh file.
|
| 730 |
+
instance_key: Unique key for the new instance.
|
| 731 |
+
in_room: Optional room name to constrain placement.
|
| 732 |
+
on_instance: Optional instance name to place on top of.
|
| 733 |
+
rotation_rpy: Initial rotation in roll-pitch-yaw.
|
| 734 |
+
n_max_attempt: Maximum placement attempts.
|
| 735 |
+
place_strategy: Either "top" or "random".
|
| 736 |
+
|
| 737 |
+
Returns:
|
| 738 |
+
List [x, y, z] of the placed instance center, or None if failed.
|
| 739 |
+
|
| 740 |
+
Raises:
|
| 741 |
+
ValueError: If instance_key already exists or room/instance not found.
|
| 742 |
+
|
| 743 |
+
"""
|
| 744 |
+
if instance_key in self.instances:
|
| 745 |
+
raise ValueError(f"Instance key '{instance_key}' already exists.")
|
| 746 |
+
|
| 747 |
+
room_poly = self._resolve_room_polygon(in_room)
|
| 748 |
+
target_area, obstacles, base_z = self._resolve_placement_target(
|
| 749 |
+
on_instance, room_poly, place_strategy
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
if target_area.is_empty:
|
| 753 |
+
logger.error("Target area for placement is empty.")
|
| 754 |
+
return None
|
| 755 |
+
|
| 756 |
+
mesh = trimesh.load(asset_path, force="mesh")
|
| 757 |
+
mesh.apply_transform(
|
| 758 |
+
trimesh.transformations.euler_matrix(*rotation_rpy, "sxyz")
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
verts = np.asarray(mesh.sample(self.mesh_sample_num))[:, :2]
|
| 762 |
+
base_poly = points_to_polygon(verts)
|
| 763 |
+
centroid = base_poly.centroid
|
| 764 |
+
base_poly = translate(base_poly, xoff=-centroid.x, yoff=-centroid.y)
|
| 765 |
+
|
| 766 |
+
placement = self._try_place_polygon(
|
| 767 |
+
base_poly, target_area, obstacles, n_max_attempt
|
| 768 |
+
)
|
| 769 |
+
|
| 770 |
+
if placement is None:
|
| 771 |
+
logger.error(
|
| 772 |
+
f"Failed to place {asset_path} after {n_max_attempt} attempts."
|
| 773 |
+
)
|
| 774 |
+
return None
|
| 775 |
+
|
| 776 |
+
x, y, candidate = placement
|
| 777 |
+
self.instances[instance_key] = candidate
|
| 778 |
+
final_z = base_z + mesh.extents[2] / 2
|
| 779 |
+
self._update_internal_state()
|
| 780 |
+
|
| 781 |
+
return [round(v, 4) for v in (x, y, final_z)]
|
| 782 |
+
|
| 783 |
+
def _resolve_room_polygon(self, in_room: str | None) -> Geometry | None:
|
| 784 |
+
"""Resolve room name to polygon.
|
| 785 |
+
|
| 786 |
+
Args:
|
| 787 |
+
in_room: Room name query string.
|
| 788 |
+
|
| 789 |
+
Returns:
|
| 790 |
+
Room polygon or None if not specified.
|
| 791 |
+
|
| 792 |
+
Raises:
|
| 793 |
+
ValueError: If room not found.
|
| 794 |
+
|
| 795 |
+
"""
|
| 796 |
+
if in_room is None:
|
| 797 |
+
return None
|
| 798 |
+
|
| 799 |
+
query_room = in_room.lower()
|
| 800 |
+
room_matches = [
|
| 801 |
+
k for k in self.rooms.keys() if query_room in k.lower()
|
| 802 |
+
]
|
| 803 |
+
|
| 804 |
+
if not room_matches:
|
| 805 |
+
raise ValueError(f"Room '{in_room}' not found.")
|
| 806 |
+
|
| 807 |
+
return unary_union([self.rooms[k] for k in room_matches])
|
| 808 |
+
|
| 809 |
+
def _resolve_placement_target(
|
| 810 |
+
self,
|
| 811 |
+
on_instance: str | None,
|
| 812 |
+
room_poly: Geometry | None,
|
| 813 |
+
place_strategy: Literal["top", "random"],
|
| 814 |
+
) -> tuple[Geometry, Geometry, float]:
|
| 815 |
+
"""Resolve the target placement area and obstacles.
|
| 816 |
+
|
| 817 |
+
Args:
|
| 818 |
+
on_instance: Instance name to place on.
|
| 819 |
+
room_poly: Room polygon constraint.
|
| 820 |
+
place_strategy: Placement strategy.
|
| 821 |
+
|
| 822 |
+
Returns:
|
| 823 |
+
Tuple of (target_area, obstacles, base_z_height).
|
| 824 |
+
|
| 825 |
+
Raises:
|
| 826 |
+
ValueError: If on_instance not found.
|
| 827 |
+
|
| 828 |
+
"""
|
| 829 |
+
if on_instance is None:
|
| 830 |
+
if room_poly is not None:
|
| 831 |
+
return room_poly, self.occ_area, 0.0
|
| 832 |
+
return self.floor_union, self.occ_area, 0.0
|
| 833 |
+
|
| 834 |
+
query_obj = on_instance.lower()
|
| 835 |
+
possible_matches = [
|
| 836 |
+
k
|
| 837 |
+
for k in self.instances.keys()
|
| 838 |
+
if query_obj in k.lower() and k != "walls"
|
| 839 |
+
]
|
| 840 |
+
|
| 841 |
+
if room_poly is not None:
|
| 842 |
+
possible_matches = [
|
| 843 |
+
k
|
| 844 |
+
for k in possible_matches
|
| 845 |
+
if self.instances[k].intersects(room_poly)
|
| 846 |
+
]
|
| 847 |
+
|
| 848 |
+
if not possible_matches:
|
| 849 |
+
location_msg = f" in room '{on_instance}'" if room_poly else ""
|
| 850 |
+
raise ValueError(
|
| 851 |
+
f"No instance matching '{on_instance}' found{location_msg}."
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
if place_strategy == "random":
|
| 855 |
+
target_parent_key = random.choice(possible_matches)
|
| 856 |
+
else:
|
| 857 |
+
target_parent_key = possible_matches[0]
|
| 858 |
+
|
| 859 |
+
if len(possible_matches) > 1:
|
| 860 |
+
logger.warning(
|
| 861 |
+
f"Multiple matches for '{on_instance}': {possible_matches}. "
|
| 862 |
+
f"Using '{target_parent_key}'."
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
meta = self.instance_meta[target_parent_key]
|
| 866 |
+
parent_mesh = trimesh.load(meta["mesh_path"], force="mesh")
|
| 867 |
+
matrix = np.eye(4)
|
| 868 |
+
matrix[:3, :3] = R.from_euler("xyz", meta["rpy"]).as_matrix()
|
| 869 |
+
matrix[:3, 3] = meta["xyz"]
|
| 870 |
+
parent_mesh.apply_transform(matrix)
|
| 871 |
+
|
| 872 |
+
best_z, surface_poly = get_actionable_surface(
|
| 873 |
+
parent_mesh, place_strategy=place_strategy
|
| 874 |
+
)
|
| 875 |
+
obstacles = self.occ_area.difference(self.instances[target_parent_key])
|
| 876 |
+
|
| 877 |
+
logger.info(f"Placing on '{target_parent_key}' (Z={best_z:.3f})")
|
| 878 |
+
|
| 879 |
+
return surface_poly, obstacles, best_z
|
| 880 |
+
|
| 881 |
+
def _try_place_polygon(
|
| 882 |
+
self,
|
| 883 |
+
base_poly: Polygon,
|
| 884 |
+
target_area: Geometry,
|
| 885 |
+
obstacles: Geometry,
|
| 886 |
+
n_max_attempt: int,
|
| 887 |
+
) -> tuple[float, float, Polygon] | None:
|
| 888 |
+
"""Try to place polygon in target area avoiding obstacles.
|
| 889 |
+
|
| 890 |
+
Args:
|
| 891 |
+
base_poly: Polygon to place (centered at origin).
|
| 892 |
+
target_area: Area where placement is allowed.
|
| 893 |
+
obstacles: Areas to avoid.
|
| 894 |
+
n_max_attempt: Maximum attempts.
|
| 895 |
+
|
| 896 |
+
Returns:
|
| 897 |
+
Tuple of (x, y, placed_polygon) or None if failed.
|
| 898 |
+
|
| 899 |
+
"""
|
| 900 |
+
minx, miny, maxx, maxy = target_area.bounds
|
| 901 |
+
|
| 902 |
+
for _ in range(n_max_attempt):
|
| 903 |
+
x = np.random.uniform(minx, maxx)
|
| 904 |
+
y = np.random.uniform(miny, maxy)
|
| 905 |
+
candidate = translate(base_poly, xoff=x, yoff=y)
|
| 906 |
+
|
| 907 |
+
if target_area.contains(candidate) and not candidate.intersects(
|
| 908 |
+
obstacles
|
| 909 |
+
):
|
| 910 |
+
return x, y, candidate
|
| 911 |
+
|
| 912 |
+
return None
|
| 913 |
+
|
| 914 |
+
def update_urdf_info(
|
| 915 |
+
self,
|
| 916 |
+
output_path: str,
|
| 917 |
+
instance_key: str,
|
| 918 |
+
visual_mesh_path: str,
|
| 919 |
+
collision_mesh_path: str | None = None,
|
| 920 |
+
trans_xyz: tuple[float, float, float] = (0, 0, 0),
|
| 921 |
+
rot_rpy: tuple[float, float, float] = DEFAULT_ROTATION_RPY,
|
| 922 |
+
joint_type: str = "fixed",
|
| 923 |
+
) -> None:
|
| 924 |
+
"""Add a new link to the URDF tree and save.
|
| 925 |
+
|
| 926 |
+
Args:
|
| 927 |
+
output_path: Path to save the updated URDF.
|
| 928 |
+
instance_key: Name for the new link.
|
| 929 |
+
visual_mesh_path: Path to the visual mesh file.
|
| 930 |
+
collision_mesh_path: Optional path to collision mesh.
|
| 931 |
+
trans_xyz: Translation (x, y, z).
|
| 932 |
+
rot_rpy: Rotation (roll, pitch, yaw).
|
| 933 |
+
joint_type: Type of joint (e.g., "fixed").
|
| 934 |
+
|
| 935 |
+
"""
|
| 936 |
+
if self._root is None:
|
| 937 |
+
return
|
| 938 |
+
|
| 939 |
+
logger.info(f"Updating URDF for instance '{instance_key}'.")
|
| 940 |
+
urdf_dir = os.path.dirname(self.urdf_path)
|
| 941 |
+
|
| 942 |
+
# Copy mesh files
|
| 943 |
+
copytree(
|
| 944 |
+
os.path.dirname(visual_mesh_path),
|
| 945 |
+
f"{urdf_dir}/{instance_key}",
|
| 946 |
+
dirs_exist_ok=True,
|
| 947 |
+
)
|
| 948 |
+
visual_rel_path = (
|
| 949 |
+
f"{instance_key}/{os.path.basename(visual_mesh_path)}"
|
| 950 |
+
)
|
| 951 |
+
|
| 952 |
+
collision_rel_path = None
|
| 953 |
+
if collision_mesh_path is not None:
|
| 954 |
+
copytree(
|
| 955 |
+
os.path.dirname(collision_mesh_path),
|
| 956 |
+
f"{urdf_dir}/{instance_key}",
|
| 957 |
+
dirs_exist_ok=True,
|
| 958 |
+
)
|
| 959 |
+
collision_rel_path = (
|
| 960 |
+
f"{instance_key}/{os.path.basename(collision_mesh_path)}"
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
# Create link element
|
| 964 |
+
link = ET.SubElement(self._root, "link", attrib={"name": instance_key})
|
| 965 |
+
|
| 966 |
+
visual = ET.SubElement(link, "visual")
|
| 967 |
+
v_geo = ET.SubElement(visual, "geometry")
|
| 968 |
+
ET.SubElement(v_geo, "mesh", attrib={"filename": visual_rel_path})
|
| 969 |
+
|
| 970 |
+
if collision_rel_path is not None:
|
| 971 |
+
collision = ET.SubElement(link, "collision")
|
| 972 |
+
c_geo = ET.SubElement(collision, "geometry")
|
| 973 |
+
ET.SubElement(
|
| 974 |
+
c_geo, "mesh", attrib={"filename": collision_rel_path}
|
| 975 |
+
)
|
| 976 |
+
|
| 977 |
+
# Create joint element
|
| 978 |
+
joint_name = f"joint_{instance_key}"
|
| 979 |
+
joint = ET.SubElement(
|
| 980 |
+
self._root,
|
| 981 |
+
"joint",
|
| 982 |
+
attrib={"name": joint_name, "type": joint_type},
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
ET.SubElement(joint, "parent", attrib={"link": "base"})
|
| 986 |
+
ET.SubElement(joint, "child", attrib={"link": instance_key})
|
| 987 |
+
|
| 988 |
+
xyz_str = f"{trans_xyz[0]:.4f} {trans_xyz[1]:.4f} {trans_xyz[2]:.4f}"
|
| 989 |
+
rpy_str = f"{rot_rpy[0]:.4f} {rot_rpy[1]:.4f} {rot_rpy[2]:.4f}"
|
| 990 |
+
ET.SubElement(joint, "origin", attrib={"xyz": xyz_str, "rpy": rpy_str})
|
| 991 |
+
|
| 992 |
+
self.save_urdf(output_path)
|
| 993 |
+
|
| 994 |
+
def update_usd_info(
|
| 995 |
+
self,
|
| 996 |
+
usd_path: str,
|
| 997 |
+
output_path: str,
|
| 998 |
+
instance_key: str,
|
| 999 |
+
visual_mesh_path: str,
|
| 1000 |
+
trans_xyz: list[float],
|
| 1001 |
+
rot_rpy: tuple[float, float, float] = DEFAULT_ROTATION_RPY,
|
| 1002 |
+
) -> None:
|
| 1003 |
+
"""Add a mesh instance to an existing USD file.
|
| 1004 |
+
|
| 1005 |
+
Args:
|
| 1006 |
+
usd_path: Path to the source USD file.
|
| 1007 |
+
output_path: Path to save the modified USD.
|
| 1008 |
+
instance_key: Prim path name for the new instance.
|
| 1009 |
+
visual_mesh_path: Path to the visual mesh (OBJ format).
|
| 1010 |
+
trans_xyz: Translation [x, y, z].
|
| 1011 |
+
rot_rpy: Rotation (roll, pitch, yaw).
|
| 1012 |
+
|
| 1013 |
+
"""
|
| 1014 |
+
import bpy
|
| 1015 |
+
from pxr import Gf, Usd, UsdGeom
|
| 1016 |
+
|
| 1017 |
+
prim_path = f"/{instance_key}"
|
| 1018 |
+
out_dir = os.path.dirname(output_path)
|
| 1019 |
+
target_dir = os.path.join(out_dir, instance_key)
|
| 1020 |
+
os.makedirs(target_dir, exist_ok=True)
|
| 1021 |
+
|
| 1022 |
+
mesh_filename = os.path.basename(visual_mesh_path)
|
| 1023 |
+
usdc_filename = os.path.splitext(mesh_filename)[0] + ".usdc"
|
| 1024 |
+
target_usdc_path = os.path.join(target_dir, usdc_filename)
|
| 1025 |
+
|
| 1026 |
+
logger.info(
|
| 1027 |
+
f"Converting with Blender (bpy): "
|
| 1028 |
+
f"{visual_mesh_path} -> {target_usdc_path}"
|
| 1029 |
+
)
|
| 1030 |
+
bpy.ops.wm.read_factory_settings(use_empty=True)
|
| 1031 |
+
bpy.ops.wm.obj_import(
|
| 1032 |
+
filepath=visual_mesh_path,
|
| 1033 |
+
forward_axis="Y",
|
| 1034 |
+
up_axis="Z",
|
| 1035 |
+
)
|
| 1036 |
+
bpy.ops.wm.usd_export(
|
| 1037 |
+
filepath=target_usdc_path,
|
| 1038 |
+
selected_objects_only=False,
|
| 1039 |
+
)
|
| 1040 |
+
|
| 1041 |
+
# Copy texture files
|
| 1042 |
+
src_dir = os.path.dirname(visual_mesh_path)
|
| 1043 |
+
for f in os.listdir(src_dir):
|
| 1044 |
+
if f.lower().endswith((".png", ".jpg", ".jpeg", ".mtl")):
|
| 1045 |
+
copy2(os.path.join(src_dir, f), target_dir)
|
| 1046 |
+
|
| 1047 |
+
final_rel_path = f"./{instance_key}/{usdc_filename}"
|
| 1048 |
+
|
| 1049 |
+
# Update USD stage
|
| 1050 |
+
stage = Usd.Stage.Open(usd_path)
|
| 1051 |
+
mesh_prim = UsdGeom.Xform.Define(stage, prim_path)
|
| 1052 |
+
|
| 1053 |
+
ref_prim = UsdGeom.Mesh.Define(stage, f"{prim_path}/Mesh")
|
| 1054 |
+
ref_prim.GetPrim().GetReferences().AddReference(final_rel_path)
|
| 1055 |
+
|
| 1056 |
+
# Build transform matrix
|
| 1057 |
+
translation_mat = Gf.Matrix4d().SetTranslate(
|
| 1058 |
+
Gf.Vec3d(trans_xyz[0], trans_xyz[1], trans_xyz[2])
|
| 1059 |
+
)
|
| 1060 |
+
rx = Gf.Matrix4d().SetRotate(
|
| 1061 |
+
Gf.Rotation(Gf.Vec3d(1, 0, 0), np.degrees(rot_rpy[0]))
|
| 1062 |
+
)
|
| 1063 |
+
ry = Gf.Matrix4d().SetRotate(
|
| 1064 |
+
Gf.Rotation(Gf.Vec3d(0, 1, 0), np.degrees(rot_rpy[1]))
|
| 1065 |
+
)
|
| 1066 |
+
rz = Gf.Matrix4d().SetRotate(
|
| 1067 |
+
Gf.Rotation(Gf.Vec3d(0, 0, 1), np.degrees(rot_rpy[2]))
|
| 1068 |
+
)
|
| 1069 |
+
rotation_mat = rx * ry * rz
|
| 1070 |
+
transform = rotation_mat * translation_mat
|
| 1071 |
+
mesh_prim.AddTransformOp().Set(transform)
|
| 1072 |
+
|
| 1073 |
+
stage.GetRootLayer().Export(output_path)
|
| 1074 |
+
logger.info(f"Exported: {output_path}")
|
| 1075 |
+
|
| 1076 |
+
def save_urdf(self, output_path: str) -> None:
|
| 1077 |
+
"""Save the current URDF tree to file.
|
| 1078 |
+
|
| 1079 |
+
Args:
|
| 1080 |
+
output_path: Path to save the URDF file.
|
| 1081 |
+
|
| 1082 |
+
"""
|
| 1083 |
+
if self._tree is None:
|
| 1084 |
+
return
|
| 1085 |
+
|
| 1086 |
+
if hasattr(ET, "indent"):
|
| 1087 |
+
ET.indent(self._tree, space=" ", level=0)
|
| 1088 |
+
|
| 1089 |
+
self._tree.write(output_path, encoding="utf-8", xml_declaration=True)
|
| 1090 |
+
logger.info(f"Saved updated URDF to {output_path}")
|
| 1091 |
+
|
| 1092 |
+
|
| 1093 |
+
def entrypoint(cfg: Scene3DGenConfig) -> None:
|
| 1094 |
+
"""Main entry point for floorplan visualization and scene manipulation.
|
| 1095 |
+
|
| 1096 |
+
Args:
|
| 1097 |
+
cfg: Configuration object with all parameters.
|
| 1098 |
+
|
| 1099 |
+
"""
|
| 1100 |
+
# Initialize collector and parse URDF
|
| 1101 |
+
collector = UrdfSemanticInfoCollector(
|
| 1102 |
+
mesh_sample_num=cfg.mesh_sample_num,
|
| 1103 |
+
ignore_items=cfg.ignore_items,
|
| 1104 |
+
)
|
| 1105 |
+
collector.collect(cfg.urdf_path)
|
| 1106 |
+
|
| 1107 |
+
# Add asset instance if specified
|
| 1108 |
+
center = None
|
| 1109 |
+
if cfg.asset_path is not None:
|
| 1110 |
+
center = collector.add_instance(
|
| 1111 |
+
asset_path=cfg.asset_path,
|
| 1112 |
+
instance_key=cfg.instance_key,
|
| 1113 |
+
in_room=cfg.in_room,
|
| 1114 |
+
on_instance=cfg.on_instance,
|
| 1115 |
+
rotation_rpy=cfg.rotation_rpy,
|
| 1116 |
+
n_max_attempt=cfg.max_placement_attempts,
|
| 1117 |
+
place_strategy=cfg.place_strategy,
|
| 1118 |
+
)
|
| 1119 |
+
|
| 1120 |
+
if center is not None:
|
| 1121 |
+
logger.info(
|
| 1122 |
+
f"Placed '{cfg.instance_key}' at position: "
|
| 1123 |
+
f"({center[0]:.3f}, {center[1]:.3f}, {center[2]:.3f})"
|
| 1124 |
+
)
|
| 1125 |
+
|
| 1126 |
+
# Update URDF if requested
|
| 1127 |
+
if cfg.update_urdf:
|
| 1128 |
+
urdf_output = cfg.urdf_path.replace(".urdf", "_updated.urdf")
|
| 1129 |
+
collision_path = cfg.asset_path.replace(
|
| 1130 |
+
".obj", "_collision.obj"
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
# Use collision mesh only if it exists
|
| 1134 |
+
if not os.path.exists(collision_path):
|
| 1135 |
+
collision_path = None
|
| 1136 |
+
|
| 1137 |
+
collector.update_urdf_info(
|
| 1138 |
+
output_path=urdf_output,
|
| 1139 |
+
instance_key=cfg.instance_key,
|
| 1140 |
+
visual_mesh_path=cfg.asset_path,
|
| 1141 |
+
collision_mesh_path=collision_path,
|
| 1142 |
+
trans_xyz=tuple(center),
|
| 1143 |
+
rot_rpy=cfg.rotation_rpy,
|
| 1144 |
+
joint_type="fixed",
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
# Update USD if requested and path is provided
|
| 1148 |
+
if cfg.update_usd and cfg.usd_path is not None:
|
| 1149 |
+
usd_output = cfg.usd_path.replace(".usdc", "_updated.usdc")
|
| 1150 |
+
collector.update_usd_info(
|
| 1151 |
+
usd_path=cfg.usd_path,
|
| 1152 |
+
output_path=usd_output,
|
| 1153 |
+
instance_key=cfg.instance_key,
|
| 1154 |
+
visual_mesh_path=cfg.asset_path,
|
| 1155 |
+
trans_xyz=center,
|
| 1156 |
+
rot_rpy=cfg.rotation_rpy,
|
| 1157 |
+
)
|
| 1158 |
+
else:
|
| 1159 |
+
logger.warning(
|
| 1160 |
+
f"Failed to place '{cfg.instance_key}' in the scene."
|
| 1161 |
+
)
|
| 1162 |
+
|
| 1163 |
+
# Generate floorplan visualization
|
| 1164 |
+
FloorplanVisualizer.plot(
|
| 1165 |
+
collector.rooms,
|
| 1166 |
+
collector.footprints,
|
| 1167 |
+
collector.occ_area,
|
| 1168 |
+
cfg.output_path,
|
| 1169 |
+
)
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
if __name__ == "__main__":
|
| 1173 |
+
config = tyro.cli(Scene3DGenConfig)
|
| 1174 |
+
entrypoint(config)
|
| 1175 |
+
|
| 1176 |
+
"""
|
| 1177 |
+
python embodied_gen/scripts/room_gen/visualize_floorplan.py \
|
| 1178 |
+
--urdf_path outputs/rooms/Office_seed68661/urdf/export_scene/scene.urdf \
|
| 1179 |
+
--output_path outputs/rooms/Office_seed68661/floorplan.png \
|
| 1180 |
+
--usd_path outputs/rooms_v2/Kitchen_seed0/usd/export_scene/export_scene.usdc \
|
| 1181 |
+
--asset_path /home/users/xinjie.wang/xinjie/asset3d-gen/outputs/semantics_tasks/task_0059/asset3d/red_apple/result/mesh/red_apple.obj \
|
| 1182 |
+
--instance_key red_apple \
|
| 1183 |
+
--in_room kitchen \
|
| 1184 |
+
--on_instance oven \
|
| 1185 |
+
--place_strategy top
|
| 1186 |
+
"""
|
embodied_gen/trainer/pono2mesh_trainer.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
|
| 18 |
-
from embodied_gen.utils.
|
| 19 |
|
| 20 |
monkey_patch_pano2room()
|
| 21 |
|
|
|
|
| 15 |
# permissions and limitations under the License.
|
| 16 |
|
| 17 |
|
| 18 |
+
from embodied_gen.utils.monkey_patch.pano2room import monkey_patch_pano2room
|
| 19 |
|
| 20 |
monkey_patch_pano2room()
|
| 21 |
|
embodied_gen/utils/gpt_clients.py
CHANGED
|
@@ -42,7 +42,8 @@ __all__ = [
|
|
| 42 |
"GPTclient",
|
| 43 |
]
|
| 44 |
|
| 45 |
-
|
|
|
|
| 46 |
|
| 47 |
|
| 48 |
class GPTclient:
|
|
|
|
| 42 |
"GPTclient",
|
| 43 |
]
|
| 44 |
|
| 45 |
+
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 46 |
+
CONFIG_FILE = os.path.join(_CURRENT_DIR, "gpt_config.yaml")
|
| 47 |
|
| 48 |
|
| 49 |
class GPTclient:
|
embodied_gen/utils/inference.py
CHANGED
|
@@ -1,4 +1,21 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
monkey_path_trellis()
|
| 4 |
import random
|
|
@@ -21,6 +38,7 @@ def image3d_model_infer(
|
|
| 21 |
seed: int = None,
|
| 22 |
**kwargs: dict,
|
| 23 |
) -> dict[str, any]:
|
|
|
|
| 24 |
if isinstance(pipe, TrellisImageTo3DPipeline):
|
| 25 |
pipe.cuda()
|
| 26 |
seg_image = trellis_preprocess(seg_image)
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from embodied_gen.utils.monkey_patch.trellis import monkey_path_trellis
|
| 19 |
|
| 20 |
monkey_path_trellis()
|
| 21 |
import random
|
|
|
|
| 38 |
seed: int = None,
|
| 39 |
**kwargs: dict,
|
| 40 |
) -> dict[str, any]:
|
| 41 |
+
"""Execute 3D generation using Trellis or SAM3D pipeline on input image."""
|
| 42 |
if isinstance(pipe, TrellisImageTo3DPipeline):
|
| 43 |
pipe.cuda()
|
| 44 |
seg_image = trellis_preprocess(seg_image)
|
embodied_gen/utils/monkey_patch/infinigen.py
ADDED
|
@@ -0,0 +1,781 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _infinigen_path():
|
| 25 |
+
current_file = os.path.abspath(__file__)
|
| 26 |
+
current_dir = os.path.dirname(current_file)
|
| 27 |
+
return os.path.abspath(
|
| 28 |
+
os.path.join(current_dir, "../../..", "thirdparty", "infinigen")
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _ensure_infinigen_on_path():
|
| 33 |
+
path = _infinigen_path()
|
| 34 |
+
if path not in sys.path:
|
| 35 |
+
sys.path.insert(0, path)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def patch_material_assignments():
|
| 39 |
+
"""Replace ceramic.tile with ceramic.Tile in utility_floor assignments."""
|
| 40 |
+
_ensure_infinigen_on_path()
|
| 41 |
+
from infinigen.assets.composition import material_assignments
|
| 42 |
+
from infinigen.assets.materials import ceramic
|
| 43 |
+
|
| 44 |
+
# utility_floor: ceramic.tile -> ceramic.Tile
|
| 45 |
+
material_assignments.utility_floor = [
|
| 46 |
+
(ceramic.Concrete, 1.0),
|
| 47 |
+
(ceramic.Plaster, 1.0),
|
| 48 |
+
(ceramic.Tile, 1.0),
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def patch_concrete():
|
| 53 |
+
"""Filter Concrete.generate kwargs to supported keys."""
|
| 54 |
+
_ensure_infinigen_on_path()
|
| 55 |
+
from infinigen.assets.materials.ceramic import concrete
|
| 56 |
+
from infinigen.core import surface
|
| 57 |
+
|
| 58 |
+
shader_concrete = concrete.shader_concrete
|
| 59 |
+
|
| 60 |
+
def patched_generate(self, **kwargs):
|
| 61 |
+
# Filter out unsupported keywords and pass remaining arguments
|
| 62 |
+
# Concrete.shader_concrete accepts: scale, base_color_hsv, seed, roughness, crack_amount, crack_scale, snake_crack
|
| 63 |
+
supported_kwargs = {
|
| 64 |
+
'scale',
|
| 65 |
+
'base_color_hsv',
|
| 66 |
+
'seed',
|
| 67 |
+
'roughness',
|
| 68 |
+
'crack_amount',
|
| 69 |
+
'crack_scale',
|
| 70 |
+
'snake_crack',
|
| 71 |
+
}
|
| 72 |
+
filtered_kwargs = {
|
| 73 |
+
k: v for k, v in kwargs.items() if k in supported_kwargs
|
| 74 |
+
}
|
| 75 |
+
return surface.shaderfunc_to_material(
|
| 76 |
+
shader_concrete, **filtered_kwargs
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
concrete.Concrete.generate = patched_generate
|
| 80 |
+
concrete.Concrete.__call__ = patched_generate
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def patch_room_constants():
|
| 84 |
+
"""Add Office to RoomConstants.home_room_types."""
|
| 85 |
+
_ensure_infinigen_on_path()
|
| 86 |
+
from infinigen.core import tags as t
|
| 87 |
+
from infinigen.core.constraints.constraint_language.constants import (
|
| 88 |
+
RoomConstants,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
_original_home_room_types = RoomConstants.home_room_types.fget
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def patched_home_room_types(self):
|
| 95 |
+
return _original_home_room_types(self) | {t.Semantics.Office}
|
| 96 |
+
|
| 97 |
+
RoomConstants.home_room_types = patched_home_room_types
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def patch_doors_base_simple():
|
| 101 |
+
"""Override BaseDoorFactory init to customize door dimensions and handles."""
|
| 102 |
+
_ensure_infinigen_on_path()
|
| 103 |
+
from infinigen.assets import colors
|
| 104 |
+
from infinigen.assets.composition import material_assignments
|
| 105 |
+
from infinigen.assets.objects.elements.doors.base import BaseDoorFactory
|
| 106 |
+
from infinigen.core.constraints.constraint_language.constants import (
|
| 107 |
+
RoomConstants,
|
| 108 |
+
)
|
| 109 |
+
from infinigen.core.placement.factory import AssetFactory
|
| 110 |
+
from infinigen.core.util.math import FixedSeed
|
| 111 |
+
from infinigen.core.util.random import weighted_sample
|
| 112 |
+
from numpy.random import uniform
|
| 113 |
+
|
| 114 |
+
_orig_init = BaseDoorFactory.__init__
|
| 115 |
+
|
| 116 |
+
def patched_init(self, factory_seed, coarse=False, constants=None):
|
| 117 |
+
_orig_init(self, factory_seed, coarse=coarse, constants=constants)
|
| 118 |
+
with FixedSeed(self.factory_seed):
|
| 119 |
+
if constants is None:
|
| 120 |
+
constants = RoomConstants()
|
| 121 |
+
self.width = constants.door_width - 0.02
|
| 122 |
+
self.door_frame_style = np.random.choice(
|
| 123 |
+
["single_column", "full_frame_square", "full_frame_dome"]
|
| 124 |
+
)
|
| 125 |
+
self.door_frame_width = 0.02
|
| 126 |
+
handle_types = ["knob", "lever", "pull", "none"]
|
| 127 |
+
if self.door_frame_style != "full_frame_dome":
|
| 128 |
+
handle_types.append("bar")
|
| 129 |
+
if self.door_frame_style != "single_column":
|
| 130 |
+
self.width += -0.02
|
| 131 |
+
self.height += -0.04
|
| 132 |
+
self.handle_type = np.random.choice(handle_types)
|
| 133 |
+
if self.handle_type == "bar":
|
| 134 |
+
self.surface = weighted_sample(material_assignments.metals)()
|
| 135 |
+
if self.handle_type == "bar":
|
| 136 |
+
self.handle_info_dict = {
|
| 137 |
+
"handle_type": self.handle_type,
|
| 138 |
+
"bar_length": uniform(0.7, 0.9) * self.width,
|
| 139 |
+
"bar_thickness": uniform(0.025, 0.045) * self.height,
|
| 140 |
+
"bar_aspect_ratio": uniform(0.4, 0.6),
|
| 141 |
+
"bar_height_ratio": uniform(0.7, 0.9),
|
| 142 |
+
"bar_length_ratio": uniform(0.5, 0.8),
|
| 143 |
+
"bar_end_length_ratio": uniform(0.1, 0.15),
|
| 144 |
+
"bar_end_height_ratio": uniform(1.8, 3.0),
|
| 145 |
+
"bar_overall_z_offset": -uniform(0.0, 0.1) * self.height,
|
| 146 |
+
"shader": weighted_sample(material_assignments.metals)(),
|
| 147 |
+
"color": colors.hsv2rgba(colors.metal_natural_hsv()),
|
| 148 |
+
}
|
| 149 |
+
else:
|
| 150 |
+
self.handle_info_dict = {"handle_type": self.handle_type}
|
| 151 |
+
if self.handle_type in ["knob", "lever"]:
|
| 152 |
+
self.handle_joint = "hinge"
|
| 153 |
+
elif self.handle_type == "bar":
|
| 154 |
+
self.handle_joint = "slide"
|
| 155 |
+
elif self.handle_type == "pull":
|
| 156 |
+
self.handle_joint = "rigid"
|
| 157 |
+
else:
|
| 158 |
+
self.handle_joint = "none"
|
| 159 |
+
|
| 160 |
+
BaseDoorFactory.__init__ = patched_init
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def patch_kitchen_cabinet():
|
| 164 |
+
"""Add kitchen_space_bottom support to kitchen cabinet factories."""
|
| 165 |
+
_ensure_infinigen_on_path()
|
| 166 |
+
from infinigen.assets.objects.shelves.kitchen_cabinet import (
|
| 167 |
+
KitchenCabinetBaseFactory,
|
| 168 |
+
KitchenCabinetFactory,
|
| 169 |
+
)
|
| 170 |
+
from numpy.random import uniform
|
| 171 |
+
|
| 172 |
+
_orig_base_init = KitchenCabinetBaseFactory.__init__
|
| 173 |
+
|
| 174 |
+
def patched_base_init(
|
| 175 |
+
self,
|
| 176 |
+
factory_seed,
|
| 177 |
+
params=None,
|
| 178 |
+
coarse=False,
|
| 179 |
+
kitchen_space_bottom=False,
|
| 180 |
+
):
|
| 181 |
+
if params is None:
|
| 182 |
+
params = {}
|
| 183 |
+
_orig_base_init(self, factory_seed, params=params, coarse=coarse)
|
| 184 |
+
self.bottom_mid = kitchen_space_bottom
|
| 185 |
+
|
| 186 |
+
KitchenCabinetBaseFactory.__init__ = patched_base_init
|
| 187 |
+
|
| 188 |
+
def patched_factory_init(
|
| 189 |
+
self,
|
| 190 |
+
factory_seed,
|
| 191 |
+
params=None,
|
| 192 |
+
coarse=False,
|
| 193 |
+
dimensions=None,
|
| 194 |
+
drawer_only=False,
|
| 195 |
+
kitchen_space_bottom=False,
|
| 196 |
+
):
|
| 197 |
+
if params is None:
|
| 198 |
+
params = {}
|
| 199 |
+
self.dimensions = dimensions
|
| 200 |
+
KitchenCabinetBaseFactory.__init__(
|
| 201 |
+
self,
|
| 202 |
+
factory_seed,
|
| 203 |
+
params=params,
|
| 204 |
+
coarse=coarse,
|
| 205 |
+
kitchen_space_bottom=kitchen_space_bottom,
|
| 206 |
+
)
|
| 207 |
+
self.drawer_only = drawer_only
|
| 208 |
+
|
| 209 |
+
KitchenCabinetFactory.__init__ = patched_factory_init
|
| 210 |
+
|
| 211 |
+
_orig_sample_params = KitchenCabinetFactory.sample_params
|
| 212 |
+
|
| 213 |
+
def patched_sample_params(self):
|
| 214 |
+
params = dict()
|
| 215 |
+
if self.dimensions is None:
|
| 216 |
+
dimensions = (
|
| 217 |
+
uniform(0.25, 0.35),
|
| 218 |
+
uniform(0.5, 1.0),
|
| 219 |
+
uniform(0.5, 1.3),
|
| 220 |
+
)
|
| 221 |
+
self.dimensions = dimensions
|
| 222 |
+
else:
|
| 223 |
+
dimensions = self.dimensions
|
| 224 |
+
params["Dimensions"] = dimensions
|
| 225 |
+
# Copy frame_params logic from original
|
| 226 |
+
params["shelf_depth"] = params["Dimensions"][0] - 0.01
|
| 227 |
+
num_h = int((params["Dimensions"][2] - 0.06) / 0.3)
|
| 228 |
+
params["shelf_cell_height"] = [
|
| 229 |
+
(params["Dimensions"][2] - 0.06) / num_h for _ in range(num_h)
|
| 230 |
+
]
|
| 231 |
+
params["side_board_thickness"] = 0.02
|
| 232 |
+
params["division_board_thickness"] = 0.02
|
| 233 |
+
params["bottom_board_height"] = 0.06
|
| 234 |
+
self.frame_params = params
|
| 235 |
+
n_cells = max(int(params["Dimensions"][1] / 0.45), 1)
|
| 236 |
+
intervals = np.random.uniform(0.55, 1.0, size=(n_cells,))
|
| 237 |
+
intervals = intervals / intervals.sum() * params["Dimensions"][1]
|
| 238 |
+
self.cabinet_widths = intervals.tolist()
|
| 239 |
+
if getattr(self, "bottom_mid", False):
|
| 240 |
+
self.cabinet_widths = [params["Dimensions"][1]]
|
| 241 |
+
|
| 242 |
+
KitchenCabinetFactory.sample_params = patched_sample_params
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def patch_kitchen_space():
|
| 246 |
+
"""Customize kitchen space/island creation with sink and layout tweaks."""
|
| 247 |
+
_ensure_infinigen_on_path()
|
| 248 |
+
from infinigen.assets.objects.shelves.kitchen_cabinet import (
|
| 249 |
+
KitchenCabinetFactory,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Need to import geometry_nodes_add_cabinet_top and nodegroup_tag_cube from same module
|
| 253 |
+
from infinigen.assets.objects.shelves.kitchen_space import (
|
| 254 |
+
KitchenIslandFactory,
|
| 255 |
+
KitchenSpaceFactory,
|
| 256 |
+
geometry_nodes_add_cabinet_top,
|
| 257 |
+
nodegroup_tag_cube,
|
| 258 |
+
)
|
| 259 |
+
from infinigen.assets.objects.table_decorations import SinkFactory
|
| 260 |
+
from infinigen.assets.objects.wall_decorations.range_hood import (
|
| 261 |
+
RangeHoodFactory,
|
| 262 |
+
)
|
| 263 |
+
from infinigen.assets.utils.object import new_bbox
|
| 264 |
+
from infinigen.core import surface, tagging
|
| 265 |
+
from infinigen.core.util import blender as butil
|
| 266 |
+
from infinigen.core.util.math import FixedSeed
|
| 267 |
+
from mathutils import Vector
|
| 268 |
+
from numpy.random import choice, uniform
|
| 269 |
+
|
| 270 |
+
_orig_ks_init = KitchenSpaceFactory.__init__
|
| 271 |
+
|
| 272 |
+
def patched_ks_init(
|
| 273 |
+
self,
|
| 274 |
+
factory_seed,
|
| 275 |
+
coarse=False,
|
| 276 |
+
dimensions=None,
|
| 277 |
+
island=False,
|
| 278 |
+
has_sink=False,
|
| 279 |
+
):
|
| 280 |
+
KitchenSpaceFactory.__bases__[0].__init__(
|
| 281 |
+
self, factory_seed, coarse=coarse
|
| 282 |
+
)
|
| 283 |
+
with FixedSeed(factory_seed):
|
| 284 |
+
if dimensions is None:
|
| 285 |
+
dimensions = Vector(
|
| 286 |
+
(uniform(0.7, 1), uniform(1.7, 5), uniform(2.3, 2.5))
|
| 287 |
+
)
|
| 288 |
+
self.island = island
|
| 289 |
+
if self.island:
|
| 290 |
+
dimensions.x *= uniform(1.5, 2)
|
| 291 |
+
dimensions.y = uniform(1, 2)
|
| 292 |
+
self.dimensions = dimensions
|
| 293 |
+
self.params = self.sample_parameters(dimensions)
|
| 294 |
+
self.has_sink = has_sink
|
| 295 |
+
|
| 296 |
+
KitchenSpaceFactory.__init__ = patched_ks_init
|
| 297 |
+
|
| 298 |
+
_orig_create_asset = KitchenSpaceFactory.create_asset
|
| 299 |
+
|
| 300 |
+
def patched_create_asset(self, **params):
|
| 301 |
+
x, y, z = self.dimensions
|
| 302 |
+
parts = []
|
| 303 |
+
cabinet_bottom_height = self.cabinet_bottom_height
|
| 304 |
+
cabinet_top_height = self.cabinet_top_height
|
| 305 |
+
mid_width = uniform(1.0, 1.3)
|
| 306 |
+
other_width = (y - mid_width) / 2.0
|
| 307 |
+
offset_bm = 0.04
|
| 308 |
+
offset_tm = 0.08
|
| 309 |
+
offset = 0.04
|
| 310 |
+
if other_width >= 0.98:
|
| 311 |
+
offset = 0.08
|
| 312 |
+
elif 0.98 > other_width >= 0.9:
|
| 313 |
+
other_width += -0.04
|
| 314 |
+
mid_width += 0.08
|
| 315 |
+
if other_width >= 1.47:
|
| 316 |
+
offset = 0.12
|
| 317 |
+
elif 1.47 > other_width >= 1.35:
|
| 318 |
+
other_width += -0.04
|
| 319 |
+
mid_width += 0.08
|
| 320 |
+
if other_width >= 1.96:
|
| 321 |
+
offset = 0.16
|
| 322 |
+
elif 1.96 > other_width >= 1.8:
|
| 323 |
+
other_width += -0.04
|
| 324 |
+
mid_width += 0.08
|
| 325 |
+
|
| 326 |
+
if self.island and other_width <= 0.3:
|
| 327 |
+
num_cells = False
|
| 328 |
+
offset = 0.08
|
| 329 |
+
if getattr(self, "has_sink", False) or y < 1.35:
|
| 330 |
+
num_cells = True
|
| 331 |
+
offset = 0.04
|
| 332 |
+
island_factory = KitchenCabinetFactory(
|
| 333 |
+
self.factory_seed,
|
| 334 |
+
dimensions=(x, y - offset, cabinet_bottom_height),
|
| 335 |
+
drawer_only=True,
|
| 336 |
+
kitchen_space_bottom=num_cells,
|
| 337 |
+
)
|
| 338 |
+
cabinet_bottom = island_factory(i=0)
|
| 339 |
+
else:
|
| 340 |
+
cabinet_bottom_factory = KitchenCabinetFactory(
|
| 341 |
+
self.factory_seed,
|
| 342 |
+
dimensions=(x, other_width - offset, cabinet_bottom_height),
|
| 343 |
+
drawer_only=True,
|
| 344 |
+
)
|
| 345 |
+
cabinet_bottom_left = cabinet_bottom_factory(i=0)
|
| 346 |
+
cabinet_bottom_right = cabinet_bottom_factory(i=1)
|
| 347 |
+
cabinet_bottom_left.location = (0.0, 0.0, 0.0)
|
| 348 |
+
cabinet_bottom_right.location = (0.0, y - other_width, 0.0)
|
| 349 |
+
cabinet_bottom_mid_factory = KitchenCabinetFactory(
|
| 350 |
+
self.factory_seed,
|
| 351 |
+
dimensions=(x, mid_width - offset_bm, cabinet_bottom_height),
|
| 352 |
+
drawer_only=True,
|
| 353 |
+
kitchen_space_bottom=True,
|
| 354 |
+
)
|
| 355 |
+
bottom_mid = cabinet_bottom_mid_factory(i=0)
|
| 356 |
+
bottom_mid.location = (0.0, y - other_width - mid_width, 0.0)
|
| 357 |
+
cabinet_bottom = butil.join_objects(
|
| 358 |
+
[cabinet_bottom_left, cabinet_bottom_right, bottom_mid]
|
| 359 |
+
)
|
| 360 |
+
parts.append(cabinet_bottom)
|
| 361 |
+
surface.add_geomod(
|
| 362 |
+
cabinet_bottom, geometry_nodes_add_cabinet_top, apply=True
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
if getattr(self, "has_sink", False):
|
| 366 |
+
sink_factory = SinkFactory(
|
| 367 |
+
factory_seed=self.factory_seed,
|
| 368 |
+
dimensions=[
|
| 369 |
+
mid_width * 0.7,
|
| 370 |
+
min(x * 0.7, 0.4),
|
| 371 |
+
cabinet_bottom_height * 0.3,
|
| 372 |
+
],
|
| 373 |
+
)
|
| 374 |
+
sink = sink_factory(i=0)
|
| 375 |
+
sink.location = (
|
| 376 |
+
(x / 2.0) - 0.3,
|
| 377 |
+
y / 2.0,
|
| 378 |
+
cabinet_bottom_height * 0.7 + 0.12,
|
| 379 |
+
)
|
| 380 |
+
sink.parent = cabinet_bottom
|
| 381 |
+
|
| 382 |
+
if not self.island:
|
| 383 |
+
cabinet_top_factory = KitchenCabinetFactory(
|
| 384 |
+
self.factory_seed,
|
| 385 |
+
dimensions=(x / 2.0, other_width - offset, cabinet_top_height),
|
| 386 |
+
drawer_only=False,
|
| 387 |
+
)
|
| 388 |
+
cabinet_top_left = cabinet_top_factory(i=0)
|
| 389 |
+
cabinet_top_right = cabinet_top_factory(i=1)
|
| 390 |
+
cabinet_top_left.location = (-x / 4.0, 0.0, z - cabinet_top_height)
|
| 391 |
+
cabinet_top_right.location = (
|
| 392 |
+
-x / 4.0,
|
| 393 |
+
y - other_width,
|
| 394 |
+
z - cabinet_top_height,
|
| 395 |
+
)
|
| 396 |
+
mid_style = choice(["cabinet"])
|
| 397 |
+
if mid_style == "range_hood":
|
| 398 |
+
range_hood_factory = RangeHoodFactory(
|
| 399 |
+
self.factory_seed,
|
| 400 |
+
dimensions=(
|
| 401 |
+
x * 0.66,
|
| 402 |
+
mid_width + 0.15,
|
| 403 |
+
cabinet_top_height,
|
| 404 |
+
),
|
| 405 |
+
)
|
| 406 |
+
top_mid = range_hood_factory(i=0)
|
| 407 |
+
top_mid.location = (
|
| 408 |
+
-x * 0.5,
|
| 409 |
+
y / 2.0,
|
| 410 |
+
z - cabinet_top_height + 0.05,
|
| 411 |
+
)
|
| 412 |
+
elif mid_style == "cabinet":
|
| 413 |
+
cabinet_top_mid_factory = KitchenCabinetFactory(
|
| 414 |
+
self.factory_seed,
|
| 415 |
+
dimensions=(
|
| 416 |
+
x / 2.0,
|
| 417 |
+
mid_width - offset_tm,
|
| 418 |
+
cabinet_top_height,
|
| 419 |
+
),
|
| 420 |
+
drawer_only=False,
|
| 421 |
+
)
|
| 422 |
+
top_mid = cabinet_top_mid_factory(i=0)
|
| 423 |
+
top_mid.location = (
|
| 424 |
+
-x / 4.0,
|
| 425 |
+
(y / 2.0) - (mid_width / 2.0),
|
| 426 |
+
z - cabinet_top_height,
|
| 427 |
+
)
|
| 428 |
+
else:
|
| 429 |
+
raise NotImplementedError
|
| 430 |
+
parts += [cabinet_top_left, cabinet_top_right, top_mid]
|
| 431 |
+
|
| 432 |
+
kitchen_space = butil.join_objects(parts)
|
| 433 |
+
if not self.island:
|
| 434 |
+
kitchen_space.dimensions = self.dimensions
|
| 435 |
+
butil.apply_transform(kitchen_space)
|
| 436 |
+
tagging.tag_system.relabel_obj(kitchen_space)
|
| 437 |
+
return kitchen_space
|
| 438 |
+
|
| 439 |
+
KitchenSpaceFactory.create_asset = patched_create_asset
|
| 440 |
+
|
| 441 |
+
def patched_island_init(self, factory_seed):
|
| 442 |
+
KitchenSpaceFactory.__init__(
|
| 443 |
+
self, factory_seed=factory_seed, island=True, has_sink=False
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
KitchenIslandFactory.__init__ = patched_island_init
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def patch_sink():
|
| 450 |
+
"""Simplify SinkFactory.sample_parameters with fixed sampling ranges."""
|
| 451 |
+
_ensure_infinigen_on_path()
|
| 452 |
+
from infinigen.assets.objects.table_decorations.sink import SinkFactory
|
| 453 |
+
from numpy.random import uniform as U
|
| 454 |
+
|
| 455 |
+
def patched_sample_parameters(
|
| 456 |
+
dimensions, upper_height, use_default=False, open=False
|
| 457 |
+
):
|
| 458 |
+
if not dimensions:
|
| 459 |
+
width = U(0.4, 1.0)
|
| 460 |
+
depth = U(0.4, 0.5)
|
| 461 |
+
upper_height = U(0.2, 0.4)
|
| 462 |
+
else:
|
| 463 |
+
width, depth, upper_height = dimensions
|
| 464 |
+
curvature = U(1.0, 1.0)
|
| 465 |
+
lower_height = U(0.00, 0.01)
|
| 466 |
+
hole_radius = U(0.02, 0.05)
|
| 467 |
+
margin = U(0.02, 0.05)
|
| 468 |
+
watertap_margin = U(0.1, 0.12)
|
| 469 |
+
params = {
|
| 470 |
+
"Width": width,
|
| 471 |
+
"Depth": depth,
|
| 472 |
+
"Curvature": curvature,
|
| 473 |
+
"Upper Height": upper_height,
|
| 474 |
+
"Lower Height": lower_height,
|
| 475 |
+
"HoleRadius": hole_radius,
|
| 476 |
+
"Margin": margin,
|
| 477 |
+
"WaterTapMargin": watertap_margin,
|
| 478 |
+
"ProtrudeAboveCounter": U(0.01, 0.025),
|
| 479 |
+
}
|
| 480 |
+
return params
|
| 481 |
+
|
| 482 |
+
SinkFactory.sample_parameters = staticmethod(patched_sample_parameters)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def patch_generate_indoors():
|
| 486 |
+
"""Force populate_doors to use all_open=True by default."""
|
| 487 |
+
_ensure_infinigen_on_path()
|
| 488 |
+
from infinigen.core.constraints.example_solver.room import (
|
| 489 |
+
decorate as room_dec,
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
_orig_populate_doors = room_dec.populate_doors
|
| 493 |
+
|
| 494 |
+
def patched_populate_doors(
|
| 495 |
+
placeholders,
|
| 496 |
+
constants,
|
| 497 |
+
n_doors=3,
|
| 498 |
+
door_chance=1,
|
| 499 |
+
casing_chance=0.0,
|
| 500 |
+
all_open=False,
|
| 501 |
+
**kwargs,
|
| 502 |
+
):
|
| 503 |
+
return _orig_populate_doors(
|
| 504 |
+
placeholders,
|
| 505 |
+
constants,
|
| 506 |
+
n_doors=n_doors,
|
| 507 |
+
door_chance=door_chance,
|
| 508 |
+
casing_chance=casing_chance,
|
| 509 |
+
all_open=True,
|
| 510 |
+
**kwargs,
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
room_dec.populate_doors = patched_populate_doors
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def patch_room_types():
|
| 517 |
+
"""Include Office in util.room_types."""
|
| 518 |
+
_ensure_infinigen_on_path()
|
| 519 |
+
from infinigen.core import tags as t
|
| 520 |
+
from infinigen_examples.constraints import util as cu
|
| 521 |
+
|
| 522 |
+
cu.room_types.add(t.Semantics.Office)
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
def patch_home_constraints():
|
| 526 |
+
"""Add office-only room constraints and desk/chair furniture rules."""
|
| 527 |
+
_ensure_infinigen_on_path()
|
| 528 |
+
from collections import OrderedDict
|
| 529 |
+
|
| 530 |
+
import gin
|
| 531 |
+
from infinigen.assets.objects import seating, shelves
|
| 532 |
+
from infinigen.core.constraints import constraint_language as cl
|
| 533 |
+
from infinigen.core.constraints.constraint_language.constants import (
|
| 534 |
+
RoomConstants,
|
| 535 |
+
)
|
| 536 |
+
from infinigen.core.tags import Semantics
|
| 537 |
+
from infinigen_examples.constraints import home as home_module
|
| 538 |
+
from infinigen_examples.constraints import util as cu
|
| 539 |
+
|
| 540 |
+
gin.enter_interactive_mode()
|
| 541 |
+
_orig_home_room_constraints = home_module.home_room_constraints
|
| 542 |
+
|
| 543 |
+
def _office_room_constraints():
|
| 544 |
+
constraints = OrderedDict()
|
| 545 |
+
score_terms = OrderedDict()
|
| 546 |
+
constants = RoomConstants(
|
| 547 |
+
fixed_contour=False, room_type={Semantics.Office}
|
| 548 |
+
)
|
| 549 |
+
rooms = cl.scene()[Semantics.RoomContour]
|
| 550 |
+
constraints["node_gen"] = rooms[Semantics.Root].all(
|
| 551 |
+
lambda r: rooms[Semantics.Office]
|
| 552 |
+
.related_to(r, cl.Traverse())
|
| 553 |
+
.count()
|
| 554 |
+
.in_range(1, 1, mean=1)
|
| 555 |
+
)
|
| 556 |
+
constraints["node"] = (
|
| 557 |
+
rooms[Semantics.Office].count().in_range(1, 1, mean=1)
|
| 558 |
+
* (rooms[Semantics.Entrance].count() >= 0)
|
| 559 |
+
* (rooms[Semantics.StaircaseRoom].count() == 0)
|
| 560 |
+
)
|
| 561 |
+
all_rooms = cl.scene()[Semantics.RoomContour]
|
| 562 |
+
rooms_filtered = all_rooms[-Semantics.Exterior][-Semantics.Staircase]
|
| 563 |
+
score_terms["room"] = (
|
| 564 |
+
rooms_filtered[Semantics.Office]
|
| 565 |
+
.sum(lambda r: (r.area() / 25).log().hinge(0, 0.4).pow(2))
|
| 566 |
+
.minimize(weight=500.0)
|
| 567 |
+
)
|
| 568 |
+
return cl.Problem(
|
| 569 |
+
constraints=constraints,
|
| 570 |
+
score_terms=score_terms,
|
| 571 |
+
constants=constants,
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
@gin.configurable(
|
| 575 |
+
"home_room_constraints", module="infinigen_examples.constraints.home"
|
| 576 |
+
)
|
| 577 |
+
def patched_home_room_constraints(
|
| 578 |
+
has_fewer_rooms=False, office_only=False
|
| 579 |
+
):
|
| 580 |
+
if office_only:
|
| 581 |
+
return _office_room_constraints()
|
| 582 |
+
return _orig_home_room_constraints(has_fewer_rooms=has_fewer_rooms)
|
| 583 |
+
|
| 584 |
+
home_module.home_room_constraints = patched_home_room_constraints
|
| 585 |
+
|
| 586 |
+
# --- home_furniture_constraints: Office room (1-2 desks, 1-2 chairs each) ---
|
| 587 |
+
_orig_home_furniture_constraints = home_module.home_furniture_constraints
|
| 588 |
+
|
| 589 |
+
def patched_home_furniture_constraints():
|
| 590 |
+
problem = _orig_home_furniture_constraints()
|
| 591 |
+
constraints = OrderedDict(problem.constraints)
|
| 592 |
+
score_terms = OrderedDict(problem.score_terms)
|
| 593 |
+
rooms = cl.scene()[{Semantics.Room, -Semantics.Object}]
|
| 594 |
+
obj = cl.scene()[{Semantics.Object, -Semantics.Room}]
|
| 595 |
+
furniture = obj[Semantics.Furniture].related_to(rooms, cu.on_floor)
|
| 596 |
+
wallfurn = furniture.related_to(rooms, cu.against_wall)
|
| 597 |
+
desks = wallfurn[shelves.SimpleDeskFactory]
|
| 598 |
+
deskchair = furniture[seating.OfficeChairFactory].related_to(
|
| 599 |
+
desks, cu.front_to_front
|
| 600 |
+
)
|
| 601 |
+
offices = rooms[Semantics.Office]
|
| 602 |
+
constraints["office_desks"] = offices.all(
|
| 603 |
+
lambda r: desks.related_to(r).count().in_range(1, 2, mean=1.5)
|
| 604 |
+
)
|
| 605 |
+
constraints["office_desk_chairs"] = offices.all(
|
| 606 |
+
lambda r: desks.related_to(r).all(
|
| 607 |
+
lambda t: deskchair.related_to(r)
|
| 608 |
+
.related_to(t)
|
| 609 |
+
.count()
|
| 610 |
+
.in_range(1, 2, mean=1.5)
|
| 611 |
+
)
|
| 612 |
+
)
|
| 613 |
+
score_terms["office_desks"] = offices.mean(
|
| 614 |
+
lambda r: desks.related_to(r).mean(
|
| 615 |
+
lambda d: (
|
| 616 |
+
cl.accessibility_cost(d, furniture.related_to(r)).minimize(
|
| 617 |
+
weight=3
|
| 618 |
+
)
|
| 619 |
+
+ cl.accessibility_cost(d, r).minimize(weight=3)
|
| 620 |
+
+ deskchair.related_to(r)
|
| 621 |
+
.distance(rooms, cu.walltags)
|
| 622 |
+
.maximize(weight=1)
|
| 623 |
+
)
|
| 624 |
+
)
|
| 625 |
+
)
|
| 626 |
+
return cl.Problem(constraints=constraints, score_terms=score_terms)
|
| 627 |
+
|
| 628 |
+
home_module.home_furniture_constraints = patched_home_furniture_constraints
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def patch_floor_plan_solver():
|
| 632 |
+
"""Guard swap_room against layouts without swap targets."""
|
| 633 |
+
_ensure_infinigen_on_path()
|
| 634 |
+
from infinigen.core.constraints.example_solver.room import (
|
| 635 |
+
solver as solver_module,
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
_orig_swap_room = solver_module.FloorPlanMoves.swap_room
|
| 639 |
+
|
| 640 |
+
def patched_swap_room(self, state, k):
|
| 641 |
+
candidates = [
|
| 642 |
+
r.target_name for r in state[k].relations if r.value.length > 0
|
| 643 |
+
]
|
| 644 |
+
if not candidates:
|
| 645 |
+
raise NotImplementedError(
|
| 646 |
+
"No valid swap targets (e.g. single-room layout)"
|
| 647 |
+
)
|
| 648 |
+
return _orig_swap_room(self, state, k)
|
| 649 |
+
|
| 650 |
+
solver_module.FloorPlanMoves.swap_room = patched_swap_room
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def patch_room_graph_root():
|
| 654 |
+
"""Allow single-room graphs to select a valid root without StaircaseRoom."""
|
| 655 |
+
_ensure_infinigen_on_path()
|
| 656 |
+
from infinigen.core.constraints.example_solver.room import (
|
| 657 |
+
base as base_module,
|
| 658 |
+
)
|
| 659 |
+
from infinigen.core.tags import Semantics
|
| 660 |
+
|
| 661 |
+
@property
|
| 662 |
+
def patched_root(self):
|
| 663 |
+
if self.entrance is None:
|
| 664 |
+
if self[Semantics.StaircaseRoom]:
|
| 665 |
+
return self.names[self[Semantics.StaircaseRoom][0]]
|
| 666 |
+
if self[Semantics.Root]:
|
| 667 |
+
return self.names[self[Semantics.Root][0]]
|
| 668 |
+
for i, n in enumerate(self.names):
|
| 669 |
+
if base_module.room_type(n) != Semantics.Exterior:
|
| 670 |
+
return self.names[i]
|
| 671 |
+
raise IndexError(
|
| 672 |
+
"Graph has no StaircaseRoom, Root, or interior room for root"
|
| 673 |
+
)
|
| 674 |
+
return self.names[self._entrance]
|
| 675 |
+
|
| 676 |
+
base_module.RoomGraph.root = patched_root
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
def _make_run_main_impl():
|
| 680 |
+
def _run_main_impl():
|
| 681 |
+
import argparse
|
| 682 |
+
from pathlib import Path
|
| 683 |
+
|
| 684 |
+
import infinigen_examples.generate_indoors as gi
|
| 685 |
+
from infinigen.core import init
|
| 686 |
+
|
| 687 |
+
parser = argparse.ArgumentParser()
|
| 688 |
+
parser.add_argument("--output_folder", type=Path)
|
| 689 |
+
parser.add_argument("--input_folder", type=Path, default=None)
|
| 690 |
+
parser.add_argument("-s", "--seed", default=None)
|
| 691 |
+
parser.add_argument(
|
| 692 |
+
"-t",
|
| 693 |
+
"--task",
|
| 694 |
+
nargs="+",
|
| 695 |
+
default=["coarse"],
|
| 696 |
+
choices=[
|
| 697 |
+
"coarse",
|
| 698 |
+
"populate",
|
| 699 |
+
"fine_terrain",
|
| 700 |
+
"ground_truth",
|
| 701 |
+
"render",
|
| 702 |
+
"mesh_save",
|
| 703 |
+
"export",
|
| 704 |
+
],
|
| 705 |
+
)
|
| 706 |
+
parser.add_argument("-g", "--configs", nargs="+", default=["base"])
|
| 707 |
+
parser.add_argument("-p", "--overrides", nargs="+", default=[])
|
| 708 |
+
parser.add_argument("--task_uniqname", type=str, default=None)
|
| 709 |
+
parser.add_argument("-d", "--debug", type=str, nargs="*", default=None)
|
| 710 |
+
|
| 711 |
+
args = init.parse_args_blender(parser)
|
| 712 |
+
|
| 713 |
+
import logging
|
| 714 |
+
|
| 715 |
+
logging.getLogger("infinigen").setLevel(logging.INFO)
|
| 716 |
+
logging.getLogger("infinigen.core.nodes.node_wrangler").setLevel(
|
| 717 |
+
logging.CRITICAL
|
| 718 |
+
)
|
| 719 |
+
if args.debug is not None:
|
| 720 |
+
for name in logging.root.manager.loggerDict:
|
| 721 |
+
if not name.startswith("infinigen"):
|
| 722 |
+
continue
|
| 723 |
+
if len(args.debug) == 0 or any(
|
| 724 |
+
name.endswith(x) for x in args.debug
|
| 725 |
+
):
|
| 726 |
+
logging.getLogger(name).setLevel(logging.DEBUG)
|
| 727 |
+
|
| 728 |
+
gi.main(args)
|
| 729 |
+
|
| 730 |
+
return _run_main_impl
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
def add_run_main_to_module(module):
|
| 734 |
+
"""Inject _run_main into generate_indoors module. Call after 'import infinigen_examples.generate_indoors as gi'."""
|
| 735 |
+
module._run_main = _make_run_main_impl()
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def patch_generate_indoors_run_main():
|
| 739 |
+
"""Legacy: add _run_main if module already in sys.modules (e.g. when patch runs from generate_indoors top)."""
|
| 740 |
+
mod = sys.modules.get("infinigen_examples.generate_indoors")
|
| 741 |
+
if mod is not None:
|
| 742 |
+
add_run_main_to_module(mod)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def monkey_patch_infinigen(
|
| 746 |
+
*,
|
| 747 |
+
material_assignments=True,
|
| 748 |
+
concrete=True,
|
| 749 |
+
room_constants=True,
|
| 750 |
+
room_types=True,
|
| 751 |
+
home_constraints=True,
|
| 752 |
+
doors=True,
|
| 753 |
+
kitchen_cabinet=True,
|
| 754 |
+
kitchen_space=True,
|
| 755 |
+
sink=True,
|
| 756 |
+
generate_indoors=True,
|
| 757 |
+
):
|
| 758 |
+
"""Apply selected monkey patches to Infinigen."""
|
| 759 |
+
if material_assignments:
|
| 760 |
+
patch_material_assignments()
|
| 761 |
+
if concrete:
|
| 762 |
+
patch_concrete()
|
| 763 |
+
if room_constants:
|
| 764 |
+
patch_room_constants()
|
| 765 |
+
if room_types:
|
| 766 |
+
patch_room_types()
|
| 767 |
+
if home_constraints:
|
| 768 |
+
patch_home_constraints()
|
| 769 |
+
if doors:
|
| 770 |
+
patch_doors_base_simple()
|
| 771 |
+
if kitchen_cabinet:
|
| 772 |
+
patch_kitchen_cabinet()
|
| 773 |
+
if kitchen_space:
|
| 774 |
+
patch_kitchen_space()
|
| 775 |
+
if sink:
|
| 776 |
+
patch_sink()
|
| 777 |
+
if generate_indoors:
|
| 778 |
+
patch_generate_indoors()
|
| 779 |
+
patch_floor_plan_solver()
|
| 780 |
+
patch_room_graph_root()
|
| 781 |
+
patch_generate_indoors_run_main()
|
embodied_gen/utils/monkey_patch/maniskill.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def monkey_patch_maniskill():
|
| 23 |
+
"""Monkey patches ManiSkillScene to support sensor image retrieval and RGBA rendering."""
|
| 24 |
+
from mani_skill.envs.scene import ManiSkillScene
|
| 25 |
+
|
| 26 |
+
def get_sensor_images(
|
| 27 |
+
self, obs: dict[str, any]
|
| 28 |
+
) -> dict[str, dict[str, torch.Tensor]]:
|
| 29 |
+
"""Retrieve images from all sensors based on observations."""
|
| 30 |
+
sensor_data = dict()
|
| 31 |
+
for name, sensor in self.sensors.items():
|
| 32 |
+
sensor_data[name] = sensor.get_images(obs[name])
|
| 33 |
+
return sensor_data
|
| 34 |
+
|
| 35 |
+
def get_human_render_camera_images(
|
| 36 |
+
self, camera_name: str = None, return_alpha: bool = False
|
| 37 |
+
) -> dict[str, torch.Tensor]:
|
| 38 |
+
"""Render images from human-view cameras, optionally generating alpha channel from segmentation."""
|
| 39 |
+
|
| 40 |
+
def get_rgba_tensor(camera, return_alpha):
|
| 41 |
+
color = camera.get_obs(
|
| 42 |
+
rgb=True, depth=False, segmentation=False, position=False
|
| 43 |
+
)["rgb"]
|
| 44 |
+
if return_alpha:
|
| 45 |
+
seg_labels = camera.get_obs(
|
| 46 |
+
rgb=False, depth=False, segmentation=True, position=False
|
| 47 |
+
)["segmentation"]
|
| 48 |
+
masks = np.where((seg_labels.cpu() > 1), 255, 0).astype(
|
| 49 |
+
np.uint8
|
| 50 |
+
)
|
| 51 |
+
masks = torch.tensor(masks).to(color.device)
|
| 52 |
+
color = torch.concat([color, masks], dim=-1)
|
| 53 |
+
|
| 54 |
+
return color
|
| 55 |
+
|
| 56 |
+
image_data = dict()
|
| 57 |
+
if self.gpu_sim_enabled:
|
| 58 |
+
if self.parallel_in_single_scene:
|
| 59 |
+
for name, camera in self.human_render_cameras.items():
|
| 60 |
+
camera.camera._render_cameras[0].take_picture()
|
| 61 |
+
rgba = get_rgba_tensor(camera, return_alpha)
|
| 62 |
+
image_data[name] = rgba
|
| 63 |
+
else:
|
| 64 |
+
for name, camera in self.human_render_cameras.items():
|
| 65 |
+
if camera_name is not None and name != camera_name:
|
| 66 |
+
continue
|
| 67 |
+
assert camera.config.shader_config.shader_pack not in [
|
| 68 |
+
"rt",
|
| 69 |
+
"rt-fast",
|
| 70 |
+
"rt-med",
|
| 71 |
+
], "ray tracing shaders do not work with parallel rendering"
|
| 72 |
+
camera.capture()
|
| 73 |
+
rgba = get_rgba_tensor(camera, return_alpha)
|
| 74 |
+
image_data[name] = rgba
|
| 75 |
+
else:
|
| 76 |
+
for name, camera in self.human_render_cameras.items():
|
| 77 |
+
if camera_name is not None and name != camera_name:
|
| 78 |
+
continue
|
| 79 |
+
camera.capture()
|
| 80 |
+
rgba = get_rgba_tensor(camera, return_alpha)
|
| 81 |
+
image_data[name] = rgba
|
| 82 |
+
|
| 83 |
+
return image_data
|
| 84 |
+
|
| 85 |
+
ManiSkillScene.get_sensor_images = get_sensor_images
|
| 86 |
+
ManiSkillScene.get_human_render_camera_images = (
|
| 87 |
+
get_human_render_camera_images
|
| 88 |
+
)
|
embodied_gen/utils/monkey_patch/pano2room.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import zipfile
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from huggingface_hub import hf_hub_download
|
| 23 |
+
from omegaconf import OmegaConf
|
| 24 |
+
from PIL import Image
|
| 25 |
+
from torchvision import transforms
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def monkey_patch_pano2room():
|
| 29 |
+
"""Monkey patches pano2room components with custom initializers and model paths."""
|
| 30 |
+
current_file_path = os.path.abspath(__file__)
|
| 31 |
+
current_dir = os.path.dirname(current_file_path)
|
| 32 |
+
sys.path.append(os.path.join(current_dir, "../../.."))
|
| 33 |
+
sys.path.append(os.path.join(current_dir, "../../../thirdparty/pano2room"))
|
| 34 |
+
from thirdparty.pano2room.modules.geo_predictors.omnidata.omnidata_normal_predictor import (
|
| 35 |
+
OmnidataNormalPredictor,
|
| 36 |
+
)
|
| 37 |
+
from thirdparty.pano2room.modules.geo_predictors.omnidata.omnidata_predictor import (
|
| 38 |
+
OmnidataPredictor,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def patched_omni_depth_init(self):
|
| 42 |
+
"""Initialize Omnidata depth predictor with explicit model loading."""
|
| 43 |
+
self.img_size = 384
|
| 44 |
+
self.model = torch.hub.load(
|
| 45 |
+
'alexsax/omnidata_models', 'depth_dpt_hybrid_384'
|
| 46 |
+
)
|
| 47 |
+
self.model.eval()
|
| 48 |
+
self.trans_totensor = transforms.Compose(
|
| 49 |
+
[
|
| 50 |
+
transforms.Resize(self.img_size, interpolation=Image.BILINEAR),
|
| 51 |
+
transforms.CenterCrop(self.img_size),
|
| 52 |
+
transforms.Normalize(mean=0.5, std=0.5),
|
| 53 |
+
]
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
OmnidataPredictor.__init__ = patched_omni_depth_init
|
| 57 |
+
|
| 58 |
+
def patched_omni_normal_init(self):
|
| 59 |
+
"""Initialize Omnidata normal predictor with explicit model loading."""
|
| 60 |
+
self.img_size = 384
|
| 61 |
+
self.model = torch.hub.load(
|
| 62 |
+
'alexsax/omnidata_models', 'surface_normal_dpt_hybrid_384'
|
| 63 |
+
)
|
| 64 |
+
self.model.eval()
|
| 65 |
+
self.trans_totensor = transforms.Compose(
|
| 66 |
+
[
|
| 67 |
+
transforms.Resize(self.img_size, interpolation=Image.BILINEAR),
|
| 68 |
+
transforms.CenterCrop(self.img_size),
|
| 69 |
+
transforms.Normalize(mean=0.5, std=0.5),
|
| 70 |
+
]
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
OmnidataNormalPredictor.__init__ = patched_omni_normal_init
|
| 74 |
+
|
| 75 |
+
def patched_panojoint_init(self, save_path=None):
|
| 76 |
+
"""Initialize PanoJointPredictor using patched depth/normal predictors."""
|
| 77 |
+
self.depth_predictor = OmnidataPredictor()
|
| 78 |
+
self.normal_predictor = OmnidataNormalPredictor()
|
| 79 |
+
self.save_path = save_path
|
| 80 |
+
|
| 81 |
+
from modules.geo_predictors import PanoJointPredictor
|
| 82 |
+
|
| 83 |
+
PanoJointPredictor.__init__ = patched_panojoint_init
|
| 84 |
+
|
| 85 |
+
# NOTE: We use gsplat instead.
|
| 86 |
+
# import depth_diff_gaussian_rasterization_min as ddgr
|
| 87 |
+
# from dataclasses import dataclass
|
| 88 |
+
# @dataclass
|
| 89 |
+
# class PatchedGaussianRasterizationSettings:
|
| 90 |
+
# image_height: int
|
| 91 |
+
# image_width: int
|
| 92 |
+
# tanfovx: float
|
| 93 |
+
# tanfovy: float
|
| 94 |
+
# bg: torch.Tensor
|
| 95 |
+
# scale_modifier: float
|
| 96 |
+
# viewmatrix: torch.Tensor
|
| 97 |
+
# projmatrix: torch.Tensor
|
| 98 |
+
# sh_degree: int
|
| 99 |
+
# campos: torch.Tensor
|
| 100 |
+
# prefiltered: bool
|
| 101 |
+
# debug: bool = False
|
| 102 |
+
# ddgr.GaussianRasterizationSettings = PatchedGaussianRasterizationSettings
|
| 103 |
+
|
| 104 |
+
# disable get_has_ddp_rank print in `BaseInpaintingTrainingModule`
|
| 105 |
+
os.environ["NODE_RANK"] = "0"
|
| 106 |
+
|
| 107 |
+
from thirdparty.pano2room.modules.inpainters.lama.saicinpainting.training.trainers import (
|
| 108 |
+
load_checkpoint,
|
| 109 |
+
)
|
| 110 |
+
from thirdparty.pano2room.modules.inpainters.lama_inpainter import (
|
| 111 |
+
LamaInpainter,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
def patched_lama_inpaint_init(self):
|
| 115 |
+
"""Initialize LamaInpainter by downloading and setting up Big-Lama model."""
|
| 116 |
+
zip_path = hf_hub_download(
|
| 117 |
+
repo_id="smartywu/big-lama",
|
| 118 |
+
filename="big-lama.zip",
|
| 119 |
+
repo_type="model",
|
| 120 |
+
)
|
| 121 |
+
extract_dir = os.path.splitext(zip_path)[0]
|
| 122 |
+
|
| 123 |
+
if not os.path.exists(extract_dir):
|
| 124 |
+
os.makedirs(extract_dir, exist_ok=True)
|
| 125 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
| 126 |
+
zip_ref.extractall(extract_dir)
|
| 127 |
+
|
| 128 |
+
config_path = os.path.join(extract_dir, 'big-lama', 'config.yaml')
|
| 129 |
+
checkpoint_path = os.path.join(
|
| 130 |
+
extract_dir, 'big-lama/models/best.ckpt'
|
| 131 |
+
)
|
| 132 |
+
train_config = OmegaConf.load(config_path)
|
| 133 |
+
train_config.training_model.predict_only = True
|
| 134 |
+
train_config.visualizer.kind = 'noop'
|
| 135 |
+
|
| 136 |
+
self.model = load_checkpoint(
|
| 137 |
+
train_config, checkpoint_path, strict=False, map_location='cpu'
|
| 138 |
+
)
|
| 139 |
+
self.model.freeze()
|
| 140 |
+
|
| 141 |
+
LamaInpainter.__init__ = patched_lama_inpaint_init
|
| 142 |
+
|
| 143 |
+
from diffusers import StableDiffusionInpaintPipeline
|
| 144 |
+
from thirdparty.pano2room.modules.inpainters.SDFT_inpainter import (
|
| 145 |
+
SDFTInpainter,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def patched_sd_inpaint_init(self, subset_name=None):
|
| 149 |
+
"""Initialize SDFTInpainter with Stable Diffusion 2 Inpainting pipeline."""
|
| 150 |
+
super(SDFTInpainter, self).__init__()
|
| 151 |
+
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
| 152 |
+
# "stabilityai/stable-diffusion-2-inpainting",
|
| 153 |
+
"sd2-community/stable-diffusion-2-inpainting",
|
| 154 |
+
torch_dtype=torch.float16,
|
| 155 |
+
).to("cuda")
|
| 156 |
+
pipe.enable_model_cpu_offload()
|
| 157 |
+
self.inpaint_pipe = pipe
|
| 158 |
+
|
| 159 |
+
SDFTInpainter.__init__ = patched_sd_inpaint_init
|
embodied_gen/utils/monkey_patch/sam3d.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
from copy import deepcopy
|
| 20 |
+
from typing import Optional, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from PIL import Image
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def monkey_patch_sam3d():
|
| 28 |
+
"""Monkey patches SAM3D inference pipelines with custom initialization and execution logic."""
|
| 29 |
+
from embodied_gen.data.utils import model_device_ctx
|
| 30 |
+
from embodied_gen.utils.log import logger
|
| 31 |
+
|
| 32 |
+
os.environ["LIDRA_SKIP_INIT"] = "true"
|
| 33 |
+
|
| 34 |
+
current_file_path = os.path.abspath(__file__)
|
| 35 |
+
current_dir = os.path.dirname(current_file_path)
|
| 36 |
+
sam3d_root = os.path.abspath(
|
| 37 |
+
os.path.join(current_dir, "../../../thirdparty/sam3d")
|
| 38 |
+
)
|
| 39 |
+
if sam3d_root not in sys.path:
|
| 40 |
+
sys.path.insert(0, sam3d_root)
|
| 41 |
+
|
| 42 |
+
def patch_pointmap_infer_pipeline():
|
| 43 |
+
"""Patches InferencePipelinePointMap.run to handle pointmap generation and 3D structure sampling."""
|
| 44 |
+
try:
|
| 45 |
+
from sam3d_objects.pipeline.inference_pipeline_pointmap import (
|
| 46 |
+
InferencePipelinePointMap,
|
| 47 |
+
)
|
| 48 |
+
except ImportError:
|
| 49 |
+
logger.error(
|
| 50 |
+
"[MonkeyPatch]: Could not import sam3d_objects directly. Check paths."
|
| 51 |
+
)
|
| 52 |
+
return
|
| 53 |
+
|
| 54 |
+
def patch_run(
|
| 55 |
+
self,
|
| 56 |
+
image: Union[None, Image.Image, np.ndarray],
|
| 57 |
+
mask: Union[None, Image.Image, np.ndarray] = None,
|
| 58 |
+
seed: Optional[int] = None,
|
| 59 |
+
stage1_only=False,
|
| 60 |
+
with_mesh_postprocess=True,
|
| 61 |
+
with_texture_baking=True,
|
| 62 |
+
with_layout_postprocess=True,
|
| 63 |
+
use_vertex_color=False,
|
| 64 |
+
stage1_inference_steps=None,
|
| 65 |
+
stage2_inference_steps=None,
|
| 66 |
+
use_stage1_distillation=False,
|
| 67 |
+
use_stage2_distillation=False,
|
| 68 |
+
pointmap=None,
|
| 69 |
+
decode_formats=None,
|
| 70 |
+
estimate_plane=False,
|
| 71 |
+
) -> dict:
|
| 72 |
+
"""Execute the inference pipeline: process image/mask, generate layouts (SS), and decode 3D shapes (SLAT)."""
|
| 73 |
+
image = self.merge_image_and_mask(image, mask)
|
| 74 |
+
with self.device:
|
| 75 |
+
pointmap_dict = self.compute_pointmap(image, pointmap)
|
| 76 |
+
pointmap = pointmap_dict["pointmap"]
|
| 77 |
+
pts = type(self)._down_sample_img(pointmap)
|
| 78 |
+
pts_colors = type(self)._down_sample_img(
|
| 79 |
+
pointmap_dict["pts_color"]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
if estimate_plane:
|
| 83 |
+
return self.estimate_plane(pointmap_dict, image)
|
| 84 |
+
|
| 85 |
+
ss_input_dict = self.preprocess_image(
|
| 86 |
+
image, self.ss_preprocessor, pointmap=pointmap
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
slat_input_dict = self.preprocess_image(
|
| 90 |
+
image, self.slat_preprocessor
|
| 91 |
+
)
|
| 92 |
+
if seed is not None:
|
| 93 |
+
torch.manual_seed(seed)
|
| 94 |
+
|
| 95 |
+
with model_device_ctx(
|
| 96 |
+
self.models["ss_generator"],
|
| 97 |
+
self.models["ss_decoder"],
|
| 98 |
+
self.condition_embedders["ss_condition_embedder"],
|
| 99 |
+
):
|
| 100 |
+
ss_return_dict = self.sample_sparse_structure(
|
| 101 |
+
ss_input_dict,
|
| 102 |
+
inference_steps=stage1_inference_steps,
|
| 103 |
+
use_distillation=use_stage1_distillation,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# We could probably use the decoder from the models themselves
|
| 107 |
+
pointmap_scale = ss_input_dict.get("pointmap_scale", None)
|
| 108 |
+
pointmap_shift = ss_input_dict.get("pointmap_shift", None)
|
| 109 |
+
ss_return_dict.update(
|
| 110 |
+
self.pose_decoder(
|
| 111 |
+
ss_return_dict,
|
| 112 |
+
scene_scale=pointmap_scale,
|
| 113 |
+
scene_shift=pointmap_shift,
|
| 114 |
+
)
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
ss_return_dict["scale"] = (
|
| 118 |
+
ss_return_dict["scale"]
|
| 119 |
+
* ss_return_dict["downsample_factor"]
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if stage1_only:
|
| 123 |
+
logger.info("Finished!")
|
| 124 |
+
ss_return_dict["voxel"] = (
|
| 125 |
+
ss_return_dict["coords"][:, 1:] / 64 - 0.5
|
| 126 |
+
)
|
| 127 |
+
return {
|
| 128 |
+
**ss_return_dict,
|
| 129 |
+
"pointmap": pts.cpu().permute((1, 2, 0)), # HxWx3
|
| 130 |
+
"pointmap_colors": pts_colors.cpu().permute(
|
| 131 |
+
(1, 2, 0)
|
| 132 |
+
), # HxWx3
|
| 133 |
+
}
|
| 134 |
+
# return ss_return_dict
|
| 135 |
+
|
| 136 |
+
coords = ss_return_dict["coords"]
|
| 137 |
+
with model_device_ctx(
|
| 138 |
+
self.models["slat_generator"],
|
| 139 |
+
self.condition_embedders["slat_condition_embedder"],
|
| 140 |
+
):
|
| 141 |
+
slat = self.sample_slat(
|
| 142 |
+
slat_input_dict,
|
| 143 |
+
coords,
|
| 144 |
+
inference_steps=stage2_inference_steps,
|
| 145 |
+
use_distillation=use_stage2_distillation,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
with model_device_ctx(
|
| 149 |
+
self.models["slat_decoder_mesh"],
|
| 150 |
+
self.models["slat_decoder_gs"],
|
| 151 |
+
self.models["slat_decoder_gs_4"],
|
| 152 |
+
):
|
| 153 |
+
outputs = self.decode_slat(
|
| 154 |
+
slat,
|
| 155 |
+
(
|
| 156 |
+
self.decode_formats
|
| 157 |
+
if decode_formats is None
|
| 158 |
+
else decode_formats
|
| 159 |
+
),
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
outputs = self.postprocess_slat_output(
|
| 163 |
+
outputs,
|
| 164 |
+
with_mesh_postprocess,
|
| 165 |
+
with_texture_baking,
|
| 166 |
+
use_vertex_color,
|
| 167 |
+
)
|
| 168 |
+
glb = outputs.get("glb", None)
|
| 169 |
+
|
| 170 |
+
try:
|
| 171 |
+
if (
|
| 172 |
+
with_layout_postprocess
|
| 173 |
+
and self.layout_post_optimization_method is not None
|
| 174 |
+
):
|
| 175 |
+
assert (
|
| 176 |
+
glb is not None
|
| 177 |
+
), "require mesh to run postprocessing"
|
| 178 |
+
logger.info(
|
| 179 |
+
"Running layout post optimization method..."
|
| 180 |
+
)
|
| 181 |
+
postprocessed_pose = self.run_post_optimization(
|
| 182 |
+
deepcopy(glb),
|
| 183 |
+
pointmap_dict["intrinsics"],
|
| 184 |
+
ss_return_dict,
|
| 185 |
+
ss_input_dict,
|
| 186 |
+
)
|
| 187 |
+
ss_return_dict.update(postprocessed_pose)
|
| 188 |
+
except Exception as e:
|
| 189 |
+
logger.error(
|
| 190 |
+
f"Error during layout post optimization: {e}",
|
| 191 |
+
exc_info=True,
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
result = {
|
| 195 |
+
**ss_return_dict,
|
| 196 |
+
**outputs,
|
| 197 |
+
"pointmap": pts.cpu().permute((1, 2, 0)),
|
| 198 |
+
"pointmap_colors": pts_colors.cpu().permute((1, 2, 0)),
|
| 199 |
+
}
|
| 200 |
+
return result
|
| 201 |
+
|
| 202 |
+
InferencePipelinePointMap.run = patch_run
|
| 203 |
+
|
| 204 |
+
def patch_infer_init():
|
| 205 |
+
"""Patches InferencePipeline.__init__ to allow CPU offloading during model initialization."""
|
| 206 |
+
import torch
|
| 207 |
+
|
| 208 |
+
try:
|
| 209 |
+
from sam3d_objects.pipeline import preprocess_utils
|
| 210 |
+
from sam3d_objects.pipeline.inference_pipeline_pointmap import (
|
| 211 |
+
InferencePipeline,
|
| 212 |
+
)
|
| 213 |
+
from sam3d_objects.pipeline.inference_utils import (
|
| 214 |
+
SLAT_MEAN,
|
| 215 |
+
SLAT_STD,
|
| 216 |
+
)
|
| 217 |
+
except ImportError:
|
| 218 |
+
print(
|
| 219 |
+
"[MonkeyPatch] Error: Could not import sam3d_objects directly for infer pipeline."
|
| 220 |
+
)
|
| 221 |
+
return
|
| 222 |
+
|
| 223 |
+
def patch_init(
|
| 224 |
+
self,
|
| 225 |
+
ss_generator_config_path,
|
| 226 |
+
ss_generator_ckpt_path,
|
| 227 |
+
slat_generator_config_path,
|
| 228 |
+
slat_generator_ckpt_path,
|
| 229 |
+
ss_decoder_config_path,
|
| 230 |
+
ss_decoder_ckpt_path,
|
| 231 |
+
slat_decoder_gs_config_path,
|
| 232 |
+
slat_decoder_gs_ckpt_path,
|
| 233 |
+
slat_decoder_mesh_config_path,
|
| 234 |
+
slat_decoder_mesh_ckpt_path,
|
| 235 |
+
slat_decoder_gs_4_config_path=None,
|
| 236 |
+
slat_decoder_gs_4_ckpt_path=None,
|
| 237 |
+
ss_encoder_config_path=None,
|
| 238 |
+
ss_encoder_ckpt_path=None,
|
| 239 |
+
decode_formats=["gaussian", "mesh"],
|
| 240 |
+
dtype="bfloat16",
|
| 241 |
+
pad_size=1.0,
|
| 242 |
+
version="v0",
|
| 243 |
+
device="cuda",
|
| 244 |
+
ss_preprocessor=preprocess_utils.get_default_preprocessor(),
|
| 245 |
+
slat_preprocessor=preprocess_utils.get_default_preprocessor(),
|
| 246 |
+
ss_condition_input_mapping=["image"],
|
| 247 |
+
slat_condition_input_mapping=["image"],
|
| 248 |
+
pose_decoder_name="default",
|
| 249 |
+
workspace_dir="",
|
| 250 |
+
downsample_ss_dist=0, # the distance we use to downsample
|
| 251 |
+
ss_inference_steps=25,
|
| 252 |
+
ss_rescale_t=3,
|
| 253 |
+
ss_cfg_strength=7,
|
| 254 |
+
ss_cfg_interval=[0, 500],
|
| 255 |
+
ss_cfg_strength_pm=0.0,
|
| 256 |
+
slat_inference_steps=25,
|
| 257 |
+
slat_rescale_t=3,
|
| 258 |
+
slat_cfg_strength=5,
|
| 259 |
+
slat_cfg_interval=[0, 500],
|
| 260 |
+
rendering_engine: str = "nvdiffrast", # nvdiffrast OR pytorch3d,
|
| 261 |
+
shape_model_dtype=None,
|
| 262 |
+
compile_model=False,
|
| 263 |
+
slat_mean=SLAT_MEAN,
|
| 264 |
+
slat_std=SLAT_STD,
|
| 265 |
+
):
|
| 266 |
+
"""Initialize pipeline components on CPU first to save GPU memory, then move necessary parts later."""
|
| 267 |
+
self.rendering_engine = rendering_engine
|
| 268 |
+
self.device = torch.device(device)
|
| 269 |
+
self.compile_model = compile_model
|
| 270 |
+
with self.device:
|
| 271 |
+
self.decode_formats = decode_formats
|
| 272 |
+
self.pad_size = pad_size
|
| 273 |
+
self.version = version
|
| 274 |
+
self.ss_condition_input_mapping = ss_condition_input_mapping
|
| 275 |
+
self.slat_condition_input_mapping = (
|
| 276 |
+
slat_condition_input_mapping
|
| 277 |
+
)
|
| 278 |
+
self.workspace_dir = workspace_dir
|
| 279 |
+
self.downsample_ss_dist = downsample_ss_dist
|
| 280 |
+
self.ss_inference_steps = ss_inference_steps
|
| 281 |
+
self.ss_rescale_t = ss_rescale_t
|
| 282 |
+
self.ss_cfg_strength = ss_cfg_strength
|
| 283 |
+
self.ss_cfg_interval = ss_cfg_interval
|
| 284 |
+
self.ss_cfg_strength_pm = ss_cfg_strength_pm
|
| 285 |
+
self.slat_inference_steps = slat_inference_steps
|
| 286 |
+
self.slat_rescale_t = slat_rescale_t
|
| 287 |
+
self.slat_cfg_strength = slat_cfg_strength
|
| 288 |
+
self.slat_cfg_interval = slat_cfg_interval
|
| 289 |
+
|
| 290 |
+
self.dtype = self._get_dtype(dtype)
|
| 291 |
+
if shape_model_dtype is None:
|
| 292 |
+
self.shape_model_dtype = self.dtype
|
| 293 |
+
else:
|
| 294 |
+
self.shape_model_dtype = self._get_dtype(shape_model_dtype)
|
| 295 |
+
|
| 296 |
+
# Setup preprocessors
|
| 297 |
+
self.pose_decoder = self.init_pose_decoder(
|
| 298 |
+
ss_generator_config_path, pose_decoder_name
|
| 299 |
+
)
|
| 300 |
+
self.ss_preprocessor = self.init_ss_preprocessor(
|
| 301 |
+
ss_preprocessor, ss_generator_config_path
|
| 302 |
+
)
|
| 303 |
+
self.slat_preprocessor = slat_preprocessor
|
| 304 |
+
|
| 305 |
+
raw_device = self.device
|
| 306 |
+
self.device = torch.device("cpu")
|
| 307 |
+
ss_generator = self.init_ss_generator(
|
| 308 |
+
ss_generator_config_path, ss_generator_ckpt_path
|
| 309 |
+
)
|
| 310 |
+
slat_generator = self.init_slat_generator(
|
| 311 |
+
slat_generator_config_path, slat_generator_ckpt_path
|
| 312 |
+
)
|
| 313 |
+
ss_decoder = self.init_ss_decoder(
|
| 314 |
+
ss_decoder_config_path, ss_decoder_ckpt_path
|
| 315 |
+
)
|
| 316 |
+
ss_encoder = self.init_ss_encoder(
|
| 317 |
+
ss_encoder_config_path, ss_encoder_ckpt_path
|
| 318 |
+
)
|
| 319 |
+
slat_decoder_gs = self.init_slat_decoder_gs(
|
| 320 |
+
slat_decoder_gs_config_path, slat_decoder_gs_ckpt_path
|
| 321 |
+
)
|
| 322 |
+
slat_decoder_gs_4 = self.init_slat_decoder_gs(
|
| 323 |
+
slat_decoder_gs_4_config_path, slat_decoder_gs_4_ckpt_path
|
| 324 |
+
)
|
| 325 |
+
slat_decoder_mesh = self.init_slat_decoder_mesh(
|
| 326 |
+
slat_decoder_mesh_config_path, slat_decoder_mesh_ckpt_path
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
# Load conditioner embedder so that we only load it once
|
| 330 |
+
ss_condition_embedder = self.init_ss_condition_embedder(
|
| 331 |
+
ss_generator_config_path, ss_generator_ckpt_path
|
| 332 |
+
)
|
| 333 |
+
slat_condition_embedder = self.init_slat_condition_embedder(
|
| 334 |
+
slat_generator_config_path, slat_generator_ckpt_path
|
| 335 |
+
)
|
| 336 |
+
self.device = raw_device
|
| 337 |
+
|
| 338 |
+
self.condition_embedders = {
|
| 339 |
+
"ss_condition_embedder": ss_condition_embedder,
|
| 340 |
+
"slat_condition_embedder": slat_condition_embedder,
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
# override generator and condition embedder setting
|
| 344 |
+
self.override_ss_generator_cfg_config(
|
| 345 |
+
ss_generator,
|
| 346 |
+
cfg_strength=ss_cfg_strength,
|
| 347 |
+
inference_steps=ss_inference_steps,
|
| 348 |
+
rescale_t=ss_rescale_t,
|
| 349 |
+
cfg_interval=ss_cfg_interval,
|
| 350 |
+
cfg_strength_pm=ss_cfg_strength_pm,
|
| 351 |
+
)
|
| 352 |
+
self.override_slat_generator_cfg_config(
|
| 353 |
+
slat_generator,
|
| 354 |
+
cfg_strength=slat_cfg_strength,
|
| 355 |
+
inference_steps=slat_inference_steps,
|
| 356 |
+
rescale_t=slat_rescale_t,
|
| 357 |
+
cfg_interval=slat_cfg_interval,
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
self.models = torch.nn.ModuleDict(
|
| 361 |
+
{
|
| 362 |
+
"ss_generator": ss_generator,
|
| 363 |
+
"slat_generator": slat_generator,
|
| 364 |
+
"ss_encoder": ss_encoder,
|
| 365 |
+
"ss_decoder": ss_decoder,
|
| 366 |
+
"slat_decoder_gs": slat_decoder_gs,
|
| 367 |
+
"slat_decoder_gs_4": slat_decoder_gs_4,
|
| 368 |
+
"slat_decoder_mesh": slat_decoder_mesh,
|
| 369 |
+
}
|
| 370 |
+
)
|
| 371 |
+
logger.info("Loading SAM3D model weights completed.")
|
| 372 |
+
|
| 373 |
+
if self.compile_model:
|
| 374 |
+
logger.info("Compiling model...")
|
| 375 |
+
self._compile()
|
| 376 |
+
logger.info("Model compilation completed!")
|
| 377 |
+
self.slat_mean = torch.tensor(slat_mean)
|
| 378 |
+
self.slat_std = torch.tensor(slat_std)
|
| 379 |
+
|
| 380 |
+
InferencePipeline.__init__ = patch_init
|
| 381 |
+
|
| 382 |
+
patch_pointmap_infer_pipeline()
|
| 383 |
+
patch_infer_init()
|
| 384 |
+
|
| 385 |
+
return
|
embodied_gen/utils/monkey_patch/trellis.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project EmbodiedGen
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 14 |
+
# implied. See the License for the specific language governing
|
| 15 |
+
# permissions and limitations under the License.
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def monkey_path_trellis():
|
| 25 |
+
"""Monkey patches TRELLIS with specific environment settings and Gaussian setup functions."""
|
| 26 |
+
current_file_path = os.path.abspath(__file__)
|
| 27 |
+
current_dir = os.path.dirname(current_file_path)
|
| 28 |
+
sys.path.append(os.path.join(current_dir, "../../.."))
|
| 29 |
+
|
| 30 |
+
from thirdparty.TRELLIS.trellis.representations import Gaussian
|
| 31 |
+
from thirdparty.TRELLIS.trellis.representations.gaussian.general_utils import (
|
| 32 |
+
build_scaling_rotation,
|
| 33 |
+
inverse_sigmoid,
|
| 34 |
+
strip_symmetric,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
os.environ["TORCH_EXTENSIONS_DIR"] = os.path.expanduser(
|
| 38 |
+
"~/.cache/torch_extensions"
|
| 39 |
+
)
|
| 40 |
+
os.environ["SPCONV_ALGO"] = "auto" # Can be 'native' or 'auto'
|
| 41 |
+
os.environ['ATTN_BACKEND'] = (
|
| 42 |
+
"xformers" # Can be 'flash-attn' or 'xformers'
|
| 43 |
+
)
|
| 44 |
+
from thirdparty.TRELLIS.trellis.modules.sparse import set_attn
|
| 45 |
+
|
| 46 |
+
set_attn("xformers")
|
| 47 |
+
|
| 48 |
+
def patched_setup_functions(self):
|
| 49 |
+
"""Configure activation functions and biases for Gaussian representation."""
|
| 50 |
+
|
| 51 |
+
def inverse_softplus(x):
|
| 52 |
+
return x + torch.log(-torch.expm1(-x))
|
| 53 |
+
|
| 54 |
+
def build_covariance_from_scaling_rotation(
|
| 55 |
+
scaling, scaling_modifier, rotation
|
| 56 |
+
):
|
| 57 |
+
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
|
| 58 |
+
actual_covariance = L @ L.transpose(1, 2)
|
| 59 |
+
symm = strip_symmetric(actual_covariance)
|
| 60 |
+
return symm
|
| 61 |
+
|
| 62 |
+
if self.scaling_activation_type == "exp":
|
| 63 |
+
self.scaling_activation = torch.exp
|
| 64 |
+
self.inverse_scaling_activation = torch.log
|
| 65 |
+
elif self.scaling_activation_type == "softplus":
|
| 66 |
+
self.scaling_activation = F.softplus
|
| 67 |
+
self.inverse_scaling_activation = inverse_softplus
|
| 68 |
+
|
| 69 |
+
self.covariance_activation = build_covariance_from_scaling_rotation
|
| 70 |
+
self.opacity_activation = torch.sigmoid
|
| 71 |
+
self.inverse_opacity_activation = inverse_sigmoid
|
| 72 |
+
self.rotation_activation = F.normalize
|
| 73 |
+
|
| 74 |
+
self.scale_bias = self.inverse_scaling_activation(
|
| 75 |
+
torch.tensor(self.scaling_bias)
|
| 76 |
+
).to(self.device)
|
| 77 |
+
self.rots_bias = torch.zeros((4)).to(self.device)
|
| 78 |
+
self.rots_bias[0] = 1
|
| 79 |
+
self.opacity_bias = self.inverse_opacity_activation(
|
| 80 |
+
torch.tensor(self.opacity_bias)
|
| 81 |
+
).to(self.device)
|
| 82 |
+
|
| 83 |
+
Gaussian.setup_functions = patched_setup_functions
|
embodied_gen/utils/simulation.py
CHANGED
|
@@ -25,6 +25,7 @@ import numpy as np
|
|
| 25 |
import sapien.core as sapien
|
| 26 |
import sapien.physx as physx
|
| 27 |
import torch
|
|
|
|
| 28 |
from mani_skill.agents.base_agent import BaseAgent
|
| 29 |
from mani_skill.envs.scene import ManiSkillScene
|
| 30 |
from mani_skill.examples.motionplanning.panda.utils import (
|
|
@@ -57,9 +58,24 @@ __all__ = [
|
|
| 57 |
"load_assets_from_layout_file",
|
| 58 |
"load_mani_skill_robot",
|
| 59 |
"render_images",
|
|
|
|
| 60 |
]
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
def load_actor_from_urdf(
|
| 64 |
scene: sapien.Scene | ManiSkillScene,
|
| 65 |
file_path: str,
|
|
@@ -203,14 +219,21 @@ def load_assets_from_layout_file(
|
|
| 203 |
# Combine initial quaternion with object quaternion
|
| 204 |
x, y, z, qx, qy, qz, qw = position
|
| 205 |
qx, qy, qz, qw = quaternion_multiply([qx, qy, qz, qw], init_quat)
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
actors[node] = actor
|
| 215 |
|
| 216 |
return actors
|
|
@@ -725,8 +748,23 @@ class FrankaPandaGrasper(object):
|
|
| 725 |
Returns:
|
| 726 |
np.ndarray: Array of grasp actions.
|
| 727 |
"""
|
| 728 |
-
|
| 729 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 730 |
obb = mesh.bounding_box_oriented
|
| 731 |
approaching = np.array([0, 0, -1])
|
| 732 |
tcp_pose = self.agent.tcp.pose[env_idx]
|
|
|
|
| 25 |
import sapien.core as sapien
|
| 26 |
import sapien.physx as physx
|
| 27 |
import torch
|
| 28 |
+
import trimesh
|
| 29 |
from mani_skill.agents.base_agent import BaseAgent
|
| 30 |
from mani_skill.envs.scene import ManiSkillScene
|
| 31 |
from mani_skill.examples.motionplanning.panda.utils import (
|
|
|
|
| 58 |
"load_assets_from_layout_file",
|
| 59 |
"load_mani_skill_robot",
|
| 60 |
"render_images",
|
| 61 |
+
"is_urdf_articulated",
|
| 62 |
]
|
| 63 |
|
| 64 |
|
| 65 |
+
def is_urdf_articulated(urdf_path: str) -> bool:
|
| 66 |
+
try:
|
| 67 |
+
tree = ET.parse(urdf_path)
|
| 68 |
+
root = tree.getroot()
|
| 69 |
+
for joint in root.findall(".//joint"):
|
| 70 |
+
j_type = joint.get("type")
|
| 71 |
+
if j_type in ["prismatic", "revolute", "continuous", "planar"]:
|
| 72 |
+
return True
|
| 73 |
+
return False
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"Error parsing URDF {urdf_path}: {e}.")
|
| 76 |
+
return False
|
| 77 |
+
|
| 78 |
+
|
| 79 |
def load_actor_from_urdf(
|
| 80 |
scene: sapien.Scene | ManiSkillScene,
|
| 81 |
file_path: str,
|
|
|
|
| 219 |
# Combine initial quaternion with object quaternion
|
| 220 |
x, y, z, qx, qy, qz, qw = position
|
| 221 |
qx, qy, qz, qw = quaternion_multiply([qx, qy, qz, qw], init_quat)
|
| 222 |
+
target_pose = sapien.Pose(p=[x, y, z], q=[qw, qx, qy, qz])
|
| 223 |
+
if is_urdf_articulated(urdf_file):
|
| 224 |
+
loader = scene.create_urdf_loader()
|
| 225 |
+
loader.fix_root_link = use_static
|
| 226 |
+
actor = loader.load(urdf_file)
|
| 227 |
+
actor.set_root_pose(target_pose)
|
| 228 |
+
else:
|
| 229 |
+
actor = load_actor_from_urdf(
|
| 230 |
+
scene,
|
| 231 |
+
urdf_file,
|
| 232 |
+
target_pose,
|
| 233 |
+
env_idx,
|
| 234 |
+
use_static=use_static,
|
| 235 |
+
update_mass=False,
|
| 236 |
+
)
|
| 237 |
actors[node] = actor
|
| 238 |
|
| 239 |
return actors
|
|
|
|
| 748 |
Returns:
|
| 749 |
np.ndarray: Array of grasp actions.
|
| 750 |
"""
|
| 751 |
+
if isinstance(actor, physx.PhysxArticulation):
|
| 752 |
+
meshes = []
|
| 753 |
+
for link in actor.links:
|
| 754 |
+
link_mesh = get_component_mesh(link, to_world_frame=True)
|
| 755 |
+
if link_mesh is not None and not link_mesh.is_empty:
|
| 756 |
+
meshes.append(link_mesh)
|
| 757 |
+
if meshes:
|
| 758 |
+
mesh = trimesh.util.concatenate(meshes)
|
| 759 |
+
else:
|
| 760 |
+
logger.warning(
|
| 761 |
+
f"Articulation {actor.name} has no valid meshes."
|
| 762 |
+
)
|
| 763 |
+
return None
|
| 764 |
+
else:
|
| 765 |
+
physx_rigid = actor.components[1]
|
| 766 |
+
mesh = get_component_mesh(physx_rigid, to_world_frame=True)
|
| 767 |
+
|
| 768 |
obb = mesh.bounding_box_oriented
|
| 769 |
approaching = np.array([0, 0, -1])
|
| 770 |
tcp_pose = self.agent.tcp.pose[env_idx]
|
embodied_gen/utils/tags.py
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
VERSION = "v0.1.
|
|
|
|
| 1 |
+
VERSION = "v0.1.8"
|
embodied_gen/validators/urdf_convertor.py
CHANGED
|
@@ -127,7 +127,7 @@ class URDFGenerator(object):
|
|
| 127 |
self.gpt_client = gpt_client
|
| 128 |
self.render_view_num = render_view_num
|
| 129 |
if render_view_num == 4:
|
| 130 |
-
view_desc = "This is orthographic projection showing the front
|
| 131 |
else:
|
| 132 |
view_desc = "This is the rendered views "
|
| 133 |
|
|
@@ -139,7 +139,7 @@ class URDFGenerator(object):
|
|
| 139 |
You are an expert in 3D object analysis and physical property estimation.
|
| 140 |
Give the category of this object asset (within 3 words), (if category is
|
| 141 |
already provided, use it directly), accurately describe this 3D object asset (within 15 words),
|
| 142 |
-
Determine the pose of the object in the first image and estimate the true vertical height
|
| 143 |
(vertical projection) range of the object (in meters), i.e., how tall the object appears from top
|
| 144 |
to bottom in the first image. also weight range (unit: kilogram), the average
|
| 145 |
static friction coefficient of the object relative to rubber and the average dynamic friction
|
|
@@ -161,14 +161,16 @@ class URDFGenerator(object):
|
|
| 161 |
use the diameter as the vertical height. If the edge is visible, use the thickness instead.
|
| 162 |
- This is not necessarily the full length of the object, but how tall it appears
|
| 163 |
in the first image vertically, based on its pose and orientation estimation on all views.
|
| 164 |
-
-
|
| 165 |
-
|
|
|
|
| 166 |
Estimate the vertical projection of their real length based on its pose.
|
| 167 |
For example:
|
| 168 |
- A pen standing upright in the first image (aligned with the image's vertical axis)
|
| 169 |
-
full body visible in the first image: β vertical height β 0.14-0.20 m
|
| 170 |
- A pen lying flat in the first image or either the tip or the tail is facing the image
|
| 171 |
-
(showing thickness or as a circle)
|
|
|
|
| 172 |
- Tilted pen in the first image (e.g., ~45Β° angle): vertical height β 0.07-0.12 m
|
| 173 |
- Use the rest views to help determine the object's 3D pose and orientation.
|
| 174 |
Assume the object is in real-world scale and estimate the approximate vertical height
|
|
@@ -204,8 +206,13 @@ class URDFGenerator(object):
|
|
| 204 |
Returns:
|
| 205 |
dict[str, any]: Parsed attributes.
|
| 206 |
"""
|
| 207 |
-
|
| 208 |
-
lines = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
category = lines[0].split(": ")[1]
|
| 210 |
description = lines[1].split(": ")[1]
|
| 211 |
min_height, max_height = map(
|
|
|
|
| 127 |
self.gpt_client = gpt_client
|
| 128 |
self.render_view_num = render_view_num
|
| 129 |
if render_view_num == 4:
|
| 130 |
+
view_desc = "This is an orthographic projection showing the front(1st image), right(2nd), back(3rd), and left(4th) views." # noqa
|
| 131 |
else:
|
| 132 |
view_desc = "This is the rendered views "
|
| 133 |
|
|
|
|
| 139 |
You are an expert in 3D object analysis and physical property estimation.
|
| 140 |
Give the category of this object asset (within 3 words), (if category is
|
| 141 |
already provided, use it directly), accurately describe this 3D object asset (within 15 words),
|
| 142 |
+
Determine the pose of the object in the first image based on all views and estimate the true vertical height
|
| 143 |
(vertical projection) range of the object (in meters), i.e., how tall the object appears from top
|
| 144 |
to bottom in the first image. also weight range (unit: kilogram), the average
|
| 145 |
static friction coefficient of the object relative to rubber and the average dynamic friction
|
|
|
|
| 161 |
use the diameter as the vertical height. If the edge is visible, use the thickness instead.
|
| 162 |
- This is not necessarily the full length of the object, but how tall it appears
|
| 163 |
in the first image vertically, based on its pose and orientation estimation on all views.
|
| 164 |
+
- Distinguish whether the entire objects such as plates, books, pens, spoons, fork are placed
|
| 165 |
+
horizontally or vertically based on pictures from left, right views.
|
| 166 |
+
|
| 167 |
Estimate the vertical projection of their real length based on its pose.
|
| 168 |
For example:
|
| 169 |
- A pen standing upright in the first image (aligned with the image's vertical axis)
|
| 170 |
+
full body visible in the first and other image: β vertically β vertical height β 0.14-0.20 m
|
| 171 |
- A pen lying flat in the first image or either the tip or the tail is facing the image
|
| 172 |
+
(showing thickness or as a circle), left/right view can show the full body
|
| 173 |
+
β horizontally β vertical height β 0.018-0.025 m
|
| 174 |
- Tilted pen in the first image (e.g., ~45Β° angle): vertical height β 0.07-0.12 m
|
| 175 |
- Use the rest views to help determine the object's 3D pose and orientation.
|
| 176 |
Assume the object is in real-world scale and estimate the approximate vertical height
|
|
|
|
| 206 |
Returns:
|
| 207 |
dict[str, any]: Parsed attributes.
|
| 208 |
"""
|
| 209 |
+
raw_lines = response.split("\n")
|
| 210 |
+
lines = []
|
| 211 |
+
for line in raw_lines:
|
| 212 |
+
line = line.strip()
|
| 213 |
+
if line and not line.startswith("```") and ":" in line:
|
| 214 |
+
lines.append(line)
|
| 215 |
+
|
| 216 |
category = lines[0].split(": ")[1]
|
| 217 |
description = lines[1].split(": ")[1]
|
| 218 |
min_height, max_height = map(
|