benchmarks-viz-tiles / scripts /render_standardized.py
ckwolfe's picture
Upload scripts/render_standardized.py with huggingface_hub
4aff85d verified
"""Standardized-framing video renderer — single source of truth.
Produces one mp4 per cell, all identical camera + resolution + codec so
the paper's tile grid lines up frame-for-frame. Bypasses each method's
native renderer (which differ in FOV, resolution, and what's onscreen)
by re-rendering a Spider-preprocessed MuJoCo scene with BENCH_CAMERA.
Inputs (choose highest-fidelity available):
1. trajectory_mjwp.npz — Spider's MJWP sampler output (closed-ish loop)
2. trajectory_ikrollout.npz — IK rollout of the kinematic trajectory
3. trajectory_kinematic.npz — pure MANO→hand IK (open-loop reference)
Output: outputs/videos_std/<method>_<hand>_<dataset>_<traj>_seed<N>_<ws>.mp4
All mp4s: 720×480, 30 fps, h264 crf 20, yuv420p.
Camera: BENCH_CAMERA["front"] pos=[0,-1.6,2.2] lookat=[0,-0.1,1.2] fov=30.
Run outside any container — needs only mujoco + numpy + ffmpeg.
"""
from __future__ import annotations
import argparse
import os
import sys
import subprocess
from pathlib import Path
from typing import Optional
import numpy as np
REPO = Path("/home/azureuser/ckwolfe/bench-dm-fix")
sys.path.insert(0, str(REPO))
sys.path.insert(0, str(REPO / "shared"))
OAKINK_TRAJS = [
"lift_board", "pick_spoon_bowl", "pour_tube", "stir_beaker",
"uncap_alcohol_burner", "unplug", "wipe_board",
]
HANDS = ["allegro", "inspire", "schunk", "xhand"]
PROCESSED = Path("/mnt/external/data/oakink_v2/processed/oakink")
OUT_DIR = Path("/mnt/external/outputs/videos_std")
MT_QPOS_DIR = Path("/mnt/external/outputs/mt_qpos")
OUT_DIR.mkdir(parents=True, exist_ok=True)
def resolve_cell(hand: str, traj: str):
"""Return (scene_xml, rollout_npz, source_tag) or (None, None, reason).
Prefers `trajectory_kinematic.npz` over mjwp because kinematic is a
clean 2-D (T, N) qpos array across all cells (~259 frames each),
giving uniform rollout length in the tile grid. mjwp is a
(T_plan, K_knots, N_dof) receding-horizon tensor and would need
per-method unrolling logic to visualize.
"""
cell = PROCESSED / hand / "bimanual" / traj
scene = cell / "scene.xml"
if not scene.is_file():
return None, None, "no scene.xml"
for nm, tag in [
("trajectory_kinematic.npz", "kinematic"),
("trajectory_ikrollout.npz", "ikrollout"),
("trajectory_mjwp.npz", "mjwp"),
]:
p = cell / "0" / nm
if p.is_file():
return scene, p, tag
return None, None, "no trajectory npz"
def load_qpos(npz_path: Path) -> Optional[np.ndarray]:
"""Return qpos as (T, N) float32. Common key aliases."""
with np.load(npz_path, allow_pickle=True) as z:
for k in ("qpos", "q", "data", "trajectory", "states"):
if k in z.files:
arr = z[k]
if arr.ndim == 1 and arr.dtype == object:
# might be a packed dict
arr = arr.item() if callable(getattr(arr, "item", None)) else arr
if isinstance(arr, dict):
for sub in ("qpos", "q", "data"):
if sub in arr:
arr = arr[sub]; break
arr = np.asarray(arr)
if arr.ndim == 2:
return arr.astype(np.float32)
if arr.ndim == 3:
# (T, B, N) — take first batch
return arr[:, 0, :].astype(np.float32)
# Fall back to first 2-D numeric array
for k in z.files:
arr = z[k]
if getattr(arr, "ndim", 0) == 2:
return arr.astype(np.float32)
return None
def render(scene_xml: Path, qpos: np.ndarray, out_mp4: Path,
camera_name: str = "front") -> tuple[bool, str]:
"""MuJoCo render + ffmpeg pipe. Returns (ok, note)."""
try:
import mujoco
except ImportError:
return False, "mujoco import failed"
from bench.video_recorder import BENCH_CAMERA, BENCH_VIDEO, VideoRecorder
cam_spec = BENCH_CAMERA[camera_name]
w, h = BENCH_VIDEO["resolution"]
fps = BENCH_VIDEO["fps"]
# mujoco sometimes rejects the per-texture colorspace attr on old scene.xml.
# Strip it as a defensive fallback.
# Defensive XML preprocessing: (1) strip unrecognized texture colorspace
# attr, (2) inject <visual><global offwidth=... offheight=.../></visual>
# so the offscreen framebuffer is big enough for our 720×480 target.
def _prep_xml(src: Path) -> Path:
import re
txt = src.read_text()
txt = txt.replace(' colorspace="linear"', '').replace(' colorspace="srgb"', '')
# Ensure exactly one <visual> block with the offscreen framebuffer
# declared. mujoco rejects duplicate <global> siblings.
new_global = f'<global offwidth="{w}" offheight="{h}"/>'
if "<global" in txt:
# rewrite any existing global (add offwidth/height if missing)
def sub(m):
tag = m.group(0)
if "offwidth" in tag and "offheight" in tag:
return tag
# insert attributes before the trailing "/>" or ">"
closing = "/>" if tag.rstrip().endswith("/>") else ">"
body = tag[:len(tag) - len(closing)].rstrip()
return f'{body} offwidth="{w}" offheight="{h}"{closing}'
txt = re.sub(r"<global[^>]*/?>", sub, txt, count=1)
elif "<visual>" in txt:
txt = txt.replace("<visual>", f"<visual>\n {new_global}", 1)
else:
txt = re.sub(r"(<mujoco[^>]*>)", rf"\1\n <visual>\n {new_global}\n </visual>", txt, count=1)
# Write the patched XML in a writable sibling of the original
# so that `../../../assets/robots/...` relative paths resolve.
# /mnt/external is owned by root, so use /tmp/scene_patches/<depth>/
# with a symlink tree mirroring the relative structure.
patch_root = Path("/tmp/scene_patches")
# Depth of scene.xml below oakink root: .../oakink/<hand>/bimanual/<task>/scene.xml
# The xml's relative refs go up 3 levels to reach .../oakink/assets/
hand = src.parents[2].name # .../oakink/<hand>/bimanual/<task>/scene.xml
task = src.parent.name
staging = patch_root / hand / "bimanual" / task
staging.mkdir(parents=True, exist_ok=True)
# Mirror the assets dir via symlink so relpaths work
assets_src = src.parents[3] / "assets" # .../oakink/assets
assets_link = patch_root / "assets"
if not assets_link.exists():
assets_link.symlink_to(assets_src)
patched = staging / "scene.xml"
patched.write_text(txt)
return patched
try:
model = mujoco.MjModel.from_xml_path(str(_prep_xml(scene_xml)))
except ValueError as exc:
return False, f"mjcf load failed: {exc}"
data = mujoco.MjData(model)
nq = model.nq
k = min(qpos.shape[1], nq)
T = int(qpos.shape[0])
try:
renderer = mujoco.Renderer(model, height=h, width=w)
except Exception as exc:
return False, f"renderer init failed: {exc}"
cam = mujoco.MjvCamera()
pos = np.array(cam_spec["pos"], dtype=np.float64)
look = np.array(cam_spec["lookat"], dtype=np.float64)
cam.lookat[:] = look
d = pos - look
cam.distance = float(np.linalg.norm(d))
cam.azimuth = float(np.degrees(np.arctan2(d[0], -d[1])))
cam.elevation = float(np.degrees(np.arcsin(d[2] / (cam.distance + 1e-9))))
out_mp4.parent.mkdir(parents=True, exist_ok=True)
rec = VideoRecorder(out_mp4, fps=fps, size=(w, h))
frames_written = 0
try:
for t in range(T):
data.qpos[:k] = qpos[t, :k]
mujoco.mj_forward(model, data)
renderer.update_scene(data, camera=cam)
rec.log_frame(renderer.render())
frames_written += 1
rec.close()
except Exception as exc:
try: rec.close()
except: pass
return False, f"render failed at frame {frames_written}: {exc}"
finally:
try: renderer.close()
except Exception: pass
return frames_written > 0, f"{frames_written} frames"
def _resolve_mt_cell(hand: str, traj: str, seed: int, warmstart: str):
"""Find the MT captured-qpos npz + matching scene.xml.
Returns (scene_xml, mt_npz, run_id) or (None, None, reason).
"""
# MT's run_id convention matches methods/maniptrans/scripts/run_eval.py.
run_id = f"maniptrans_{hand}_oakink_v2_{traj}_bimanual_seed{seed}_{warmstart}"
mt_npz = MT_QPOS_DIR / f"{run_id}.npz"
if not mt_npz.is_file():
return None, None, f"no MT qpos at {mt_npz.name}"
scene = PROCESSED / hand / "bimanual" / traj / "scene.xml"
if not scene.is_file():
return None, None, "no scene.xml"
return scene, mt_npz, run_id
def render_mt(scene_xml: Path, mt_npz: Path, out_mp4: Path,
camera_name: str = "front") -> tuple[bool, str]:
"""Render MT's captured DOF trajectory by name-permutation into scene.xml.
MT dumps IG-ordered `q` plus `dof_names`. MJ scene.xml has its own
per-joint ordering. We build a permutation `mj_qpos_adr[j] = ig_q[k]`
where `dof_names[k] == mj_joint_names[j]`.
"""
try:
import mujoco
except ImportError:
return False, "mujoco import failed"
from bench.video_recorder import BENCH_CAMERA, BENCH_VIDEO, VideoRecorder
with np.load(mt_npz, allow_pickle=True) as z:
if "q" not in z.files:
return False, f"npz missing 'q' key (has {z.files})"
mt_q = z["q"].astype(np.float32) # (T, n_ig_dof)
ig_names = list(z["dof_names"]) if "dof_names" in z.files else None
T = int(mt_q.shape[0])
if T == 0:
return False, "empty mt_q"
cam_spec = BENCH_CAMERA[camera_name]
w, h = BENCH_VIDEO["resolution"]
fps = BENCH_VIDEO["fps"]
# Reuse the XML prep logic from render() via a tiny local copy.
import re as _re
patched = scene_xml # placeholder; assigned below
try:
import subprocess # noqa: F401 - for ffmpeg via VideoRecorder
txt = scene_xml.read_text()
txt = txt.replace(' colorspace="linear"', '').replace(' colorspace="srgb"', '')
new_global = f'<global offwidth="{w}" offheight="{h}"/>'
if "<global" in txt:
def _sub(m):
tag = m.group(0)
if "offwidth" in tag and "offheight" in tag:
return tag
closing = "/>" if tag.rstrip().endswith("/>") else ">"
body = tag[:len(tag) - len(closing)].rstrip()
return f'{body} offwidth="{w}" offheight="{h}"{closing}'
txt = _re.sub(r"<global[^>]*/?>", _sub, txt, count=1)
elif "<visual>" in txt:
txt = txt.replace("<visual>", f"<visual>\n {new_global}", 1)
else:
txt = _re.sub(r"(<mujoco[^>]*>)", rf"\1\n <visual>\n {new_global}\n </visual>",
txt, count=1)
patch_root = Path("/tmp/scene_patches")
hand = scene_xml.parents[2].name
task = scene_xml.parent.name
staging = patch_root / hand / "bimanual" / task
staging.mkdir(parents=True, exist_ok=True)
assets_src = scene_xml.parents[3] / "assets"
assets_link = patch_root / "assets"
if not assets_link.exists():
assets_link.symlink_to(assets_src)
patched = staging / "scene.xml"
patched.write_text(txt)
except Exception as exc:
return False, f"xml prep failed: {exc}"
try:
model = mujoco.MjModel.from_xml_path(str(patched))
except ValueError as exc:
return False, f"mjcf load failed: {exc}"
# Build MJ-joint -> IG-DOF permutation by name.
perm = None
if ig_names is not None:
ig_idx = {str(n): i for i, n in enumerate(ig_names)}
perm = []
for j in range(model.njnt):
jn = mujoco.mj_id2name(model, mujoco.mjtObj.mjOBJ_JOINT, j)
qadr = int(model.jnt_qposadr[j])
jtype = int(model.jnt_type[j])
# Skip free + ball joints (nq>1 per joint) — we don't map obj free joint
if jtype != mujoco.mjtJoint.mjJNT_HINGE and jtype != mujoco.mjtJoint.mjJNT_SLIDE:
continue
if jn in ig_idx:
perm.append((qadr, ig_idx[jn]))
note_perm = f"perm size={len(perm)}/{model.njnt}"
else:
# Fallback: write first min(nq, n_ig_dof) values directly.
k = min(model.nq, mt_q.shape[1])
perm = [(i, i) for i in range(k)]
note_perm = f"no dof_names; fallback linear k={k}"
data = mujoco.MjData(model)
try:
renderer = mujoco.Renderer(model, height=h, width=w)
except Exception as exc:
return False, f"renderer init failed: {exc}"
cam = mujoco.MjvCamera()
pos = np.array(cam_spec["pos"], dtype=np.float64)
look = np.array(cam_spec["lookat"], dtype=np.float64)
cam.lookat[:] = look
d = pos - look
cam.distance = float(np.linalg.norm(d))
cam.azimuth = float(np.degrees(np.arctan2(d[0], -d[1])))
cam.elevation = float(np.degrees(np.arcsin(d[2] / (cam.distance + 1e-9))))
out_mp4.parent.mkdir(parents=True, exist_ok=True)
rec = VideoRecorder(out_mp4, fps=fps, size=(w, h))
frames = 0
try:
for t in range(T):
q_t = mt_q[t]
for qadr, idx in perm:
if idx < q_t.shape[0]:
data.qpos[qadr] = q_t[idx]
mujoco.mj_forward(model, data)
renderer.update_scene(data, camera=cam)
rec.log_frame(renderer.render())
frames += 1
rec.close()
except Exception as exc:
try: rec.close()
except: pass
return False, f"render failed at frame {frames}: {exc}"
finally:
try: renderer.close()
except Exception: pass
return frames > 0, f"{frames} frames, {note_perm}"
def main() -> int:
ap = argparse.ArgumentParser()
ap.add_argument("--hand", choices=HANDS, default=None)
ap.add_argument("--traj", choices=OAKINK_TRAJS, default=None)
ap.add_argument("--source", choices=["kinematic", "mt"], default="kinematic",
help="'kinematic' = Oracle/reference via Spider preprocess; "
"'mt' = ManipTrans captured qpos via bench_hooks.patch")
ap.add_argument("--mt-seed", type=int, default=42,
help="Seed tag on MT run_id (default 42)")
ap.add_argument("--mt-warmstart", default="default",
help="Warmstart tag on MT run_id (default 'default')")
ap.add_argument("--method-label", default=None,
help="Override filename prefix (default: 'std' for kinematic, 'mt' for mt)")
ap.add_argument("--force", action="store_true")
args = ap.parse_args()
cells = []
for h in HANDS if args.hand is None else [args.hand]:
for t in OAKINK_TRAJS if args.traj is None else [args.traj]:
cells.append((h, t))
label = args.method_label or ("mt" if args.source == "mt" else "std")
n_ok = n_fail = n_skip = 0
print(f"{'='*72}\nstandardized render ({args.source}) — {len(cells)} cells\n{'='*72}")
print(f"camera: front pos=[0,-1.6,2.2] lookat=[0,-0.1,1.2] fov=30")
print(f"video: 720×480 @ 30fps, h264 crf=20")
print(f"output: {OUT_DIR}\n")
for hand, traj in cells:
if args.source == "mt":
scene, npz, tag_or_rid = _resolve_mt_cell(hand, traj, args.mt_seed, args.mt_warmstart)
if scene is None:
print(f" [fail] {hand}/{traj}: {tag_or_rid}")
n_fail += 1; continue
# Preserve seed + warmstart in output name so each MT run renders distinctly.
out = OUT_DIR / f"{label}_{hand}_oakink_v2_{traj}_bimanual_seed{args.mt_seed}_{args.mt_warmstart}.mp4"
if out.is_file() and not args.force:
print(f" [skip] {out.name} (exists)"); n_skip += 1; continue
ok, note = render_mt(scene, npz, out)
status = "✅pass" if ok else "❌fail"
print(f" [{status}] {hand}/{traj} (src=mt_qpos): {note}")
if ok: n_ok += 1
else: n_fail += 1
continue
run_id = f"{label}_{hand}_oakink_v2_{traj}_bimanual_seed0_default"
out = OUT_DIR / f"{run_id}.mp4"
if out.is_file() and not args.force:
print(f" [skip] {run_id} (exists)")
n_skip += 1; continue
scene, npz, tag = resolve_cell(hand, traj)
if scene is None:
print(f" [fail] {hand}/{traj}: {tag}")
n_fail += 1; continue
qpos = load_qpos(npz)
if qpos is None or qpos.shape[0] == 0:
print(f" [fail] {hand}/{traj}: no qpos in {npz.name}")
n_fail += 1; continue
ok, note = render(scene, qpos, out)
status = "✅pass" if ok else "❌fail"
print(f" [{status}] {hand}/{traj} (src={tag}): {note}")
if ok:
n_ok += 1
else:
n_fail += 1
print(f"\n{'='*72}\nsummary: {n_ok} rendered, {n_fail} failed, "
f"{n_skip} skipped (already present)\n{'='*72}")
return 0 if n_fail == 0 else 1
if __name__ == "__main__":
sys.exit(main())