mutil_mode_data / visualize_synced_data.py
rollingoat's picture
Upload visualize_synced_data.py with huggingface_hub
d18f707 verified
"""Visualize a synced HDF5 dataset (output of sync_image_low_dim.py) as per-demo MP4s.
For each demo, renders selected image observations side-by-side and overlays
selected low-dim observations as text on every frame.
Example:
python visualize_synced_data.py synced.h5 --out-dir ./vis --fps 10 \
--image-keys agentview_image oak_image \
--overlay-keys robot0_eef_pos robot0_gripper_qpos
"""
import argparse
import os
import cv2
import h5py
import numpy as np
def format_value(v) -> str:
arr = np.asarray(v).reshape(-1)
if arr.size == 1:
return f"{float(arr[0]):.3f}"
return "[" + ", ".join(f"{float(x):.3f}" for x in arr) + "]"
def visualize_demo(
demo_grp: h5py.Group,
demo_name: str,
out_dir: str,
image_keys: list,
fps: int,
overlay_keys: list,
target_size=(480, 680),
) -> None:
print(f"Visualizing {demo_name} ({image_keys})...")
obs = demo_grp["obs"]
present_keys = [k for k in image_keys if k in obs]
if not present_keys:
print(f" Skipping {demo_name}: none of {image_keys} in obs.")
return
image_stacks = [obs[k][:] for k in present_keys]
n_frames = min(len(s) for s in image_stacks)
if n_frames == 0:
print(f" Skipping {demo_name}: no frames.")
return
overlay_data = []
for key in overlay_keys:
if key not in obs:
print(f" Note: overlay key '{key}' not in obs, skipping.")
continue
overlay_data.append((key, obs[key][:]))
target_h, per_w = target_size
total_w = per_w * len(present_keys)
total_h = target_h
video_path = os.path.join(out_dir, f"{demo_name}.mp4")
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter(video_path, fourcc, fps, (total_w, total_h))
for i in range(n_frames):
panels = []
for key, stack in zip(present_keys, image_stacks):
frame = stack[i]
if frame.ndim == 2:
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
else:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame = cv2.resize(frame, (per_w, target_h))
cv2.putText(frame, key, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
panels.append(frame)
row = np.hstack(panels)
cv2.putText(row, f"Frame: {i}/{n_frames}", (10, target_h - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
# Overlay low-dim values as text (top-right, stacked)
y = 30
for key, data in overlay_data:
val = data[i] if i < len(data) else data[-1]
text = f"{key}: {format_value(val)}"
(tw, th), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
x = total_w - tw - 10
cv2.putText(row, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 200, 255), 2)
y += th + 10
writer.write(row)
writer.release()
print(f" Saved {video_path}")
def _sort_key(name: str):
parts = name.split("_")
for p in reversed(parts):
if p.isdigit():
return (0, int(p))
return (1, name)
def main() -> None:
parser = argparse.ArgumentParser(description="Visualize synced HDF5 dataset as videos.")
parser.add_argument("file", help="Path to synced HDF5 file")
parser.add_argument("--out-dir", default="./vis", help="Output directory for videos")
parser.add_argument("--fps", type=int, default=30)
parser.add_argument("--image-keys", nargs="+", default=["agentview_image"],
help="Image observation keys to render side-by-side")
parser.add_argument("--overlay-keys", nargs="*", default=[],
help="Low-dim obs keys to overlay as text on each frame")
parser.add_argument("--target-size", type=int, nargs=2, default=[240, 680],
metavar=("H", "W"))
args = parser.parse_args()
if not os.path.exists(args.file):
print(f"File not found: {args.file}")
return
os.makedirs(args.out_dir, exist_ok=True)
with h5py.File(args.file, "r") as f:
if "data" not in f:
print("Invalid file: no top-level 'data' group.")
return
demos = sorted(f["data"].keys(), key=_sort_key)
print(f"Found {len(demos)} demos.")
for demo in demos:
visualize_demo(
f["data"][demo],
demo,
args.out_dir,
args.image_keys,
args.fps,
args.overlay_keys,
tuple(args.target_size),
)
print("Done.")
if __name__ == "__main__":
main()