jonas.sugar commited on
Commit
00429c1
Β·
1 Parent(s): 26e4c90

add renderer tools

Browse files
tools/generate_object_snapshots_and_video.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) 2023-2026, AgiBot Inc. All Rights Reserved.
3
+ # Author: Genie Sim Team
4
+ # License: Mozilla Public License Version 2.0
5
+
6
+ import os
7
+ import glob
8
+ from pathlib import Path
9
+ import numpy as np
10
+ import cv2
11
+
12
+ from isaacsim import SimulationApp
13
+
14
+ simulation_app = SimulationApp({"headless": True})
15
+
16
+ import carb
17
+ import omni.usd
18
+ import omni.kit.viewport.utility
19
+
20
+ settings = carb.settings.get_settings()
21
+ settings.set_bool("/rtx/post/aa/alpha", True) # enable alpha
22
+ settings.set_bool("/rtx/background/visible", False) # hide dome/background
23
+
24
+ settings.set_bool("/app/viewport/grid/enabled", False)
25
+ settings.set_bool("/app/viewport/showViewportAxis", False)
26
+ settings.set_bool("/app/viewport/showWireframe", False)
27
+
28
+ settings.set_float("/rtx/post/resample/clearColorR", 0.0)
29
+ settings.set_float("/rtx/post/resample/clearColorG", 0.0)
30
+ settings.set_float("/rtx/post/resample/clearColorB", 0.0)
31
+ settings.set_float("/rtx/post/resample/clearColorA", 0.0)
32
+
33
+ settings.set_bool("/app/viewport/showGizmos", False)
34
+ settings.set_bool("/gui/ui/visible", False)
35
+
36
+ settings.set_bool("/rtx/post/tonemap/enabled", False) # Tonemapping can bake alpha to 1.0
37
+ settings.set_bool("/rtx/post/composite/enabled", True)
38
+ settings.set_bool("/rtx/post/dlss/execMode", 0) # DLSS can interfere with alpha
39
+
40
+ settings.set_bool("/rtx/domeLight/cameraVisible", False)
41
+
42
+ settings.set_int("/rtx/rendermode", 0)
43
+
44
+ from omni.isaac.core.utils.stage import open_stage
45
+ from pxr import UsdLux, UsdGeom, Sdf, Gf
46
+ import sys
47
+
48
+ _SRC_ROOT = Path(__file__).resolve().parent.parent.parent
49
+ if str(_SRC_ROOT) not in sys.path:
50
+ sys.path.insert(0, str(_SRC_ROOT))
51
+
52
+ from assets import ASSETS_INDEX, ASSETS_PATH
53
+
54
+
55
+ def render_thumbnails(usd_path: str, visualization_path: Path):
56
+ usd_path = str(Path(usd_path).resolve())
57
+ print(f"Opening: {usd_path}")
58
+ os.makedirs(visualization_path, exist_ok=True)
59
+ # Open the stage
60
+ open_stage(usd_path)
61
+
62
+ # 2. Add Lighting (CRITICAL for non-black images)
63
+ # The builtin utility just captures pixels; it does not auto-light the scene.
64
+ # We must add a light if the scene doesn't have one.
65
+
66
+ stage = omni.usd.get_context().get_stage()
67
+
68
+ # Create a Dome Light
69
+ dome_light = UsdLux.DomeLight.Define(stage, Sdf.Path("/World/DomwLight"))
70
+ dome_light.GetEnableColorTemperatureAttr().Set(True)
71
+ dome_light.GetColorTemperatureAttr().Set(6150.0)
72
+ dome_light.GetIntensityAttr().Set(1)
73
+ dome_light.GetExposureAttr().Set(9.0)
74
+ dome_light.GetPrim().CreateAttribute("cameraVisibility", Sdf.ValueTypeNames.Bool).Set(False)
75
+ dome_light.GetPrim().CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(False)
76
+ xform_dome = UsdGeom.Xformable(dome_light)
77
+ xform_dome.ClearXformOpOrder() # Ensure a clean slate
78
+ xform_dome.AddTranslateOp().Set(Gf.Vec3d(0, 0, 305))
79
+
80
+ # Create a Distant Light (Sun)
81
+ distant_light = UsdLux.DistantLight.Define(stage, Sdf.Path("/World/DistantLight"))
82
+ # Apply your specific parameters
83
+ distant_light.GetIntensityAttr().Set(1.0)
84
+ distant_light.GetExposureAttr().Set(10.0)
85
+ distant_light.GetAngleAttr().Set(0.53)
86
+
87
+ # Set Temperature (Must enable colorTemperature first)
88
+ distant_light.GetEnableColorTemperatureAttr().Set(True)
89
+ distant_light.GetColorTemperatureAttr().Set(7250.0)
90
+
91
+ # CRITICAL: Ensure this light doesn't create a solid background
92
+ distant_light.GetPrim().CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(False)
93
+
94
+ xform_dist = UsdGeom.Xformable(distant_light)
95
+ xform_dist.ClearXformOpOrder()
96
+ xform_dist.AddTranslateOp().Set(Gf.Vec3d(0, 0, 305))
97
+ # AddRotateXYZOp is specifically for the (Roll, Pitch, Yaw) in XYZ sequence
98
+ xform_dist.AddRotateXYZOp().Set(Gf.Vec3f(55, 0, 135))
99
+
100
+ # 3. Warmup Render Loop
101
+ # We must step the physics/render engine to let textures load
102
+ print(" -> Warming up engine...")
103
+ for _ in range(20): # Reduced from 50 to 20
104
+ simulation_app.update()
105
+
106
+ # Setup output directory
107
+ # out_dir = Path(usd_path).parent / "thumbnails"
108
+ out_dir = visualization_path / "snapshot"
109
+ out_dir.mkdir(exist_ok=True)
110
+
111
+ # 4. Capture from Cameras
112
+ # Find all cameras in the stage
113
+ cameras = []
114
+ for prim in stage.Traverse():
115
+ if prim.IsA(UsdGeom.Camera) and prim.GetTypeName() == "Camera" and "/World/Camera" in prim.GetPath().pathString:
116
+ cameras.append(prim.GetPath().pathString)
117
+
118
+ if not cameras:
119
+ # If no cameras, create a default one looking at origin?
120
+ print(" -> No cameras found in stage.")
121
+ return
122
+
123
+ # Find the object prim to rotate
124
+ object_prim_path = "/World/object"
125
+ object_prim = stage.GetPrimAtPath(object_prim_path)
126
+ if not object_prim.IsValid():
127
+ print(f" -> Object prim not found at {object_prim_path}")
128
+ return
129
+
130
+ # Get the active viewport to switch cameras
131
+ viewport_api = omni.kit.viewport.utility.get_active_viewport()
132
+ viewport_api.resolution = (720, 720)
133
+
134
+ # Rotation angles: 0, 60, 120, 180, 240, 300 degrees (6 angles)
135
+ rotation_angles = [0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330]
136
+
137
+ # Store all captured images for video creation
138
+ all_images = [] # List of (camera_idx, rotation_idx, image_path)
139
+
140
+ for i, cam_path in enumerate(cameras):
141
+ # Switch Viewport to this camera
142
+ viewport_api.set_active_camera(cam_path)
143
+ for _ in range(5): # Reduced from 10 to 5
144
+ simulation_app.update()
145
+
146
+ # Rotate object and capture at each angle
147
+ for rot_idx, angle_deg in enumerate(rotation_angles):
148
+ # Apply rotation around Z-axis
149
+ xform = UsdGeom.Xformable(object_prim)
150
+ xform.ClearXformOpOrder()
151
+ angle_rad = np.radians(angle_deg)
152
+ rot_quat = Gf.Quatf(np.cos(angle_rad / 2.0), 0.0, 0.0, np.sin(angle_rad / 2.0))
153
+ xform.AddOrientOp().Set(rot_quat)
154
+
155
+ for _ in range(5): # Reduced from 10 to 5
156
+ simulation_app.update()
157
+
158
+ # Define output path for this rotation
159
+ out_path = out_dir / f"Camera{i+1}_rot{rot_idx:02d}.png"
160
+ if out_path.exists():
161
+ os.remove(str(out_path))
162
+
163
+ # Capture the viewport
164
+ print(f" -> Capturing {cam_path} at rotation {angle_deg}Β°")
165
+ omni.kit.viewport.utility.capture_viewport_to_file(
166
+ viewport_api=viewport_api,
167
+ file_path=str(out_path),
168
+ is_hdr=False,
169
+ )
170
+
171
+ # Wait for the file to actually write (reduced max wait time)
172
+ frames_waited = 0
173
+ while not out_path.exists() and frames_waited < 50: # Reduced from 100 to 50
174
+ simulation_app.update()
175
+ frames_waited += 1
176
+
177
+ if out_path.exists():
178
+ all_images.append((i, rot_idx, str(out_path)))
179
+
180
+ # Create videos for each camera
181
+ video_dir = out_dir.parent / "video"
182
+ video_dir.mkdir(exist_ok=True)
183
+
184
+ individual_video_paths = []
185
+
186
+ for i, cam_path in enumerate(cameras):
187
+ # Collect all images for this camera, sorted by rotation index
188
+ camera_data = [(rot_idx, img_path) for cam_idx, rot_idx, img_path in all_images if cam_idx == i]
189
+ camera_data.sort(key=lambda x: x[0])
190
+ camera_images = [img_path for _, img_path in camera_data]
191
+
192
+ if not camera_images:
193
+ continue
194
+
195
+ # Create video from images
196
+ video_path = video_dir / f"Camera{i+1}.mp4"
197
+ if video_path.exists():
198
+ os.remove(str(video_path))
199
+
200
+ print(f" -> Creating video for Camera{i+1} from {len(camera_images)} images")
201
+
202
+ # Read first image to get dimensions
203
+ first_img = cv2.imread(camera_images[0])
204
+ if first_img is None:
205
+ continue
206
+
207
+ height, width = first_img.shape[:2]
208
+ fps = 6 # Increased from 2 to 6 for faster playback (3x speed)
209
+
210
+ # Create video writer
211
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
212
+ video_writer = cv2.VideoWriter(str(video_path), fourcc, fps, (width, height))
213
+
214
+ # Write all frames (optimized: read and write in one loop)
215
+ for img_path in camera_images:
216
+ img = cv2.imread(img_path)
217
+ if img is not None:
218
+ video_writer.write(img)
219
+
220
+ video_writer.release()
221
+ print(f" -> Video saved to {video_path}")
222
+ individual_video_paths.append(str(video_path))
223
+
224
+ # Create merged video from all cameras (or single camera)
225
+ if len(individual_video_paths) >= 1:
226
+ print(f" -> Creating merged video from {len(individual_video_paths)} camera(s)")
227
+
228
+ # Read first video to get dimensions and fps
229
+ first_cap = cv2.VideoCapture(individual_video_paths[0])
230
+ if first_cap.isOpened():
231
+ width = int(first_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
232
+ height = int(first_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
233
+ fps = first_cap.get(cv2.CAP_PROP_FPS)
234
+ first_cap.release()
235
+
236
+ merged_video_path = video_dir / "merged.mp4"
237
+ if merged_video_path.exists():
238
+ os.remove(str(merged_video_path))
239
+
240
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
241
+ merged_writer = cv2.VideoWriter(str(merged_video_path), fourcc, fps, (width, height))
242
+
243
+ # Read and write frames from each video sequentially
244
+ total_frames = 0
245
+ for video_path in individual_video_paths:
246
+ cap = cv2.VideoCapture(video_path)
247
+ if cap.isOpened():
248
+ while True:
249
+ ret, frame = cap.read()
250
+ if not ret:
251
+ break
252
+ merged_writer.write(frame)
253
+ total_frames += 1
254
+ cap.release()
255
+
256
+ merged_writer.release()
257
+ print(f" -> Merged video saved ({total_frames} frames)")
258
+
259
+ # Delete individual camera videos after merging
260
+ for video_path in individual_video_paths:
261
+ try:
262
+ if os.path.exists(video_path):
263
+ os.remove(video_path)
264
+ print(f" -> Deleted {Path(video_path).name}")
265
+ except Exception as e:
266
+ print(f" -> Failed to delete {video_path}: {e}")
267
+
268
+
269
+ def main():
270
+ # Find all recorder.usda files
271
+
272
+ usds = glob.glob(f"{ASSETS_PATH}/**/recorder.usda", recursive=True)
273
+ for idx, usd in enumerate(usds):
274
+ asset_id = Path(usd).parent.name
275
+ visualization_path = ASSETS_PATH / "extra" / "visualization" / asset_id
276
+ video_path = visualization_path / "video" / "merged.mp4"
277
+ if os.path.exists(video_path):
278
+ continue
279
+ print(f"{idx+1}/{len(usds)}")
280
+ render_thumbnails(usd, visualization_path)
281
+
282
+
283
+ if __name__ == "__main__":
284
+ main()
285
+ simulation_app.close()
tools/generate_recorder_usda.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) 2023-2026, AgiBot Inc. All Rights Reserved.
3
+ # Author: Genie Sim Team
4
+ # License: Mozilla Public License Version 2.0
5
+
6
+ """
7
+ list_usd_prims.py
8
+ Walk through ./assets, read object_parameters.json, open the referenced USD file
9
+ and list every prim's full path and name.
10
+ """
11
+
12
+ import os
13
+ import sys
14
+ from pathlib import Path
15
+
16
+ import numpy as np
17
+
18
+ try:
19
+ from pxr import Usd, UsdGeom, UsdPhysics, Gf, Sdf, Tf
20
+ except ImportError:
21
+ sys.exit("❌ Missing pxr.Usd. Install OpenUSD: pip install usd-core")
22
+
23
+
24
+ _SRC_ROOT = Path(__file__).resolve().parent.parent.parent
25
+ if str(_SRC_ROOT) not in sys.path:
26
+ sys.path.insert(0, str(_SRC_ROOT))
27
+
28
+ from assets import ASSETS_INDEX, ASSETS_PATH
29
+
30
+
31
+ def compute_bbox_info(prim: Usd.Prim) -> Gf.Range3d:
32
+ bbox_cache = UsdGeom.BBoxCache(
33
+ Usd.TimeCode.Default(),
34
+ [
35
+ UsdGeom.Tokens.default_,
36
+ UsdGeom.Tokens.render,
37
+ UsdGeom.Tokens.proxy,
38
+ UsdGeom.Tokens.guide,
39
+ ],
40
+ # useExtentsHint=True,
41
+ useExtentsHint=False,
42
+ )
43
+ bbox_cache.Clear()
44
+ # world_xform = bbox_cache.ComputeRelativeTransform(
45
+ # prim, # prim whose bbox we want
46
+ # stage.GetPseudoRoot() # relative to world
47
+ # )
48
+
49
+ # UsdGeom.BBoxCache already gives a *world-space* Gf.BBox3d
50
+ world_bbox = bbox_cache.ComputeWorldBound(prim)
51
+ waligned_range = world_bbox.ComputeAlignedRange()
52
+ woriented_box = world_bbox.GetBox()
53
+ wmatrix = world_bbox.GetMatrix()
54
+ wrot = Gf.Quatd(wmatrix.ExtractRotationQuat())
55
+
56
+ wcenter = waligned_range.GetMidpoint()
57
+ wsize = waligned_range.GetSize()
58
+
59
+ local_bbox = bbox_cache.ComputeLocalBound(prim)
60
+ laligned_range = local_bbox.ComputeAlignedRange()
61
+
62
+ lsize = laligned_range.GetSize()
63
+ lcenter = laligned_range.GetMidpoint()
64
+
65
+ def compute_true_bbox(parent_prim):
66
+ bbox_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), [UsdGeom.Tokens.default_], useExtentsHint=False)
67
+ merged = None
68
+
69
+ for p in Usd.PrimRange(parent_prim):
70
+ if p.IsA(UsdGeom.Mesh):
71
+ b = bbox_cache.ComputeWorldBound(p).ComputeAlignedRange()
72
+ if merged is None:
73
+ merged = Gf.Range3d(b.GetMin(), b.GetMax())
74
+ else:
75
+ merged = merged.UnionWith(b) # βœ… correct method
76
+ return merged
77
+
78
+ bbox = compute_true_bbox(prim)
79
+ return bbox.GetMidpoint(), bbox.GetSize(), Gf.Quatd(1, 0, 0, 0)
80
+
81
+
82
+ ASSETS_PATH_OBJ = ASSETS_PATH / "objects"
83
+
84
+
85
+ def load_scene(scene_path: str) -> Usd.Stage:
86
+ abs_scene_path = os.path.join(ASSETS_PATH, scene_path)
87
+ if scene_path and os.path.exists(abs_scene_path):
88
+ print(f"Loading scene file: {abs_scene_path}")
89
+ # usd_context = Usd.Stage.CreateNew("s.usda")
90
+ usd_context = Usd.Stage.Open(abs_scene_path) # , load=Usd.Stage.LoadNone
91
+ print("Scene load complete")
92
+ else:
93
+ print(f"Scene file not found: {abs_scene_path}; creating new empty stage")
94
+ # usd_context = Usd.Stage.CreateInMemory()
95
+ usd_context = Usd.Stage.CreateNew(scene_path)
96
+
97
+ return usd_context
98
+
99
+
100
+ def check_item(object_id, info):
101
+ usda_url = str(ASSETS_PATH / info["url"])
102
+ thumnail_url = usda_url.replace("Aligned.usda", "recorder.usda")
103
+ if os.path.exists(thumnail_url):
104
+ os.remove(thumnail_url)
105
+
106
+ stage = load_scene(thumnail_url)
107
+
108
+ # Set up units and stage
109
+ UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
110
+ stage.SetMetadata(UsdGeom.Tokens.metersPerUnit, 1.0)
111
+
112
+ # Root /World
113
+ xform0 = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
114
+ UsdGeom.Xformable(xform0.GetPrim()).AddOrientOp().Set(Gf.Quatf(1, 0, 0, 0))
115
+ stage.SetDefaultPrim(stage.GetPrimAtPath("/World"))
116
+
117
+ # Object prim
118
+ xform = UsdGeom.Xform.Define(stage, Sdf.Path("/World/object"))
119
+ xform.ClearXformOpOrder()
120
+ prim = xform.GetPrim()
121
+ prim.GetPayloads().AddPayload(usda_url)
122
+ # prim.GetPayloads().AddPayload(str(usda_url)[:-1])
123
+
124
+ # Compute bounding box info
125
+ bbox_center, bbox_size, bbox_rotq = compute_bbox_info(prim)
126
+
127
+ # Compute camera distance based on object size
128
+ max_dimension = max(bbox_size[0], bbox_size[1], bbox_size[2])
129
+ if max_dimension < 0.5:
130
+ camera_distance = max_dimension * 2.0 + 0.1
131
+ else:
132
+ camera_distance = max_dimension * 2.0
133
+ camera_distance = float(camera_distance) # ensure scalar
134
+
135
+ # Create 3 cameras at 45Β°, -45Β° pitch angles
136
+ num_cameras = 2
137
+ pitch_angles = [45.0, -45.0] # degrees: top, horizontal, bottom
138
+
139
+ rot = Gf.Matrix3d(bbox_rotq)
140
+ camera_paths = []
141
+ camera_names = []
142
+ cam_idx = 0
143
+
144
+ for i in range(num_cameras):
145
+ # Use fixed pitch angles: 45Β°, 0Β°, -45Β°
146
+ pitch_deg = pitch_angles[i]
147
+
148
+ # Yaw: keep at 0Β° (cameras in same vertical plane)
149
+ yaw_deg = 0.0
150
+
151
+ yaw = np.radians(yaw_deg)
152
+ pitch = np.radians(pitch_deg)
153
+
154
+ # Spherical direction vector
155
+ d = Gf.Vec3d(
156
+ np.cos(pitch) * np.cos(yaw),
157
+ np.cos(pitch) * np.sin(yaw),
158
+ np.sin(pitch),
159
+ )
160
+
161
+ world_dir = rot * d
162
+ world_dir.Normalize()
163
+ pos = bbox_center + world_dir * camera_distance
164
+
165
+ # Define camera
166
+ name = f"Camera{cam_idx+1}"
167
+ camera_names.append(name)
168
+ cam_idx += 1
169
+ cam_prim = UsdGeom.Camera.Define(stage, Sdf.Path(f"/World/{name}"))
170
+ camera = UsdGeom.Camera(cam_prim)
171
+
172
+ # Set FOV and clipping
173
+ camera.CreateFocalLengthAttr(18.14756) # in mm (optional)
174
+ camera.CreateHorizontalApertureAttr(15.290) # default in mm
175
+ camera.CreateVerticalApertureAttr(15.290) # default in mm
176
+ camera.CreateFStopAttr(0.0) # optional, for depth of field
177
+ camera.CreateFocusDistanceAttr(camera_distance) # optional
178
+ camera.CreateClippingRangeAttr().Set(Gf.Vec2f(0.05, 100.0))
179
+ camera.GetVisibilityAttr().Set(UsdGeom.Tokens.invisible)
180
+
181
+ xf = UsdGeom.Xformable(cam_prim)
182
+
183
+ # Build look-at matrix (camera always looks at bbox_center)
184
+ forward = (bbox_center - pos).GetNormalized()
185
+ z_axis = -forward
186
+ up = Gf.Vec3d(0, 0, 1)
187
+ x_axis = Gf.Cross(up, z_axis)
188
+ if x_axis.GetLength() < 1e-4:
189
+ up = Gf.Vec3d(0, 1, 0)
190
+ x_axis = Gf.Cross(up, z_axis)
191
+ x_axis = x_axis.GetNormalized()
192
+ y_axis = Gf.Cross(z_axis, x_axis).GetNormalized()
193
+
194
+ M = Gf.Matrix4d(
195
+ x_axis[0],
196
+ x_axis[1],
197
+ x_axis[2],
198
+ 0.0,
199
+ y_axis[0],
200
+ y_axis[1],
201
+ y_axis[2],
202
+ 0.0,
203
+ z_axis[0],
204
+ z_axis[1],
205
+ z_axis[2],
206
+ 0.0,
207
+ pos[0],
208
+ pos[1],
209
+ pos[2],
210
+ 1.0,
211
+ )
212
+ xf.ClearXformOpOrder()
213
+ xf.AddTransformOp().Set(M)
214
+ camera_paths.append(str(cam_prim.GetPath()))
215
+
216
+ stage.GetRootLayer().Save()
217
+
218
+
219
+ def main():
220
+ for object_id in ASSETS_INDEX:
221
+ info = ASSETS_INDEX[object_id]
222
+ usd_filepath = info["url"]
223
+ visualization_path = ASSETS_PATH / "extra" / "visualization" / object_id
224
+ video_path = visualization_path / "video" / "merged.mp4"
225
+ if os.path.exists(video_path):
226
+ continue
227
+ print(usd_filepath)
228
+ check_item(object_id, info)
229
+
230
+
231
+ if __name__ == "__main__":
232
+ main()