|
|
"""
|
|
|
HDF5 分片生成脚本:读取 MP4 与 JSON,生成符合规范的 shard_XXXX.h5
|
|
|
|
|
|
层级设计(示例):
|
|
|
|
|
|
shard_XXXX.h5
|
|
|
├── /dataset_name_0/
|
|
|
│ ├── @dataset_source: "AgiBot World"
|
|
|
│ ├── @dataset_version: "alpha"
|
|
|
│ ├── @num_trajectories: <N>
|
|
|
│ │
|
|
|
│ ├── /traj_0000/
|
|
|
│ │ ├── @task: "Pickup items in the supermarket"
|
|
|
│ │ ├── @task_id: "327"
|
|
|
│ │ ├── @episode_id: "648642"
|
|
|
│ │ ├── @scene_id: <init_scene_text>
|
|
|
│ │ ├── @robot_type: "unknown"
|
|
|
│ │ ├── @success: 1
|
|
|
│ │ ├── @num_frames: T
|
|
|
│ │ ├── @fps: F
|
|
|
│ │ ├── @duration_sec: T/F
|
|
|
│ │ ├── @camera_views: ["head", "left", "right", ...]
|
|
|
│ │ │
|
|
|
│ │ ├── images_head: [T, H, W, 3] uint8
|
|
|
│ │ ├── images_left: [T, H, W, 3] uint8
|
|
|
│ │ ├── images_right: [T, H, W, 3] uint8
|
|
|
│ │ │
|
|
|
│ │ ├── progress: [T] float32
|
|
|
│ │ ├── done: [T] bool
|
|
|
│ │ └── value: [T] float32
|
|
|
|
|
|
使用方法(示例):
|
|
|
|
|
|
1) 安装依赖(Windows):
|
|
|
pip install h5py numpy opencv-python
|
|
|
|
|
|
2) 运行脚本(你的分段目录作为根,例如 648642-684757):
|
|
|
python build_h5_shard.py \
|
|
|
--dataset-name agibot_world \
|
|
|
--task-json e:/trae_code/20251111data/database/AgiBot_World/task_327.json \
|
|
|
--obs-root e:/trae_code/20251111data/OpenDriveLab___AgiBot-World/raw/main/observations/327/648642-684757 \
|
|
|
--task-id 327 \
|
|
|
--output e:/trae_code/20251111data/shard_327.h5
|
|
|
|
|
|
3) 可选参数:
|
|
|
--dataset-source "AgiBot World" --dataset-version "alpha" --robot-type "franka"
|
|
|
|
|
|
脚本会在 <obs-root>/<episode_id>/videos 下查找 MP4,并固定映射:
|
|
|
head_color → images_head,hand_left_color → images_left,hand_right_color → images_right。
|
|
|
若 obs-root 指向上层目录(如 observations),也会在子目录中递归查找 `<episode_id>/videos`。
|
|
|
|
|
|
注意:该脚本按时间维度进行流式写入,避免一次性加载整段视频到内存。
|
|
|
|
|
|
分片规则:
|
|
|
- 单个 H5 文件最多写入 150 条轨迹(可通过 `--max-traj-per-shard` 配置)。
|
|
|
- 当达到上限时,自动创建新的 H5 文件,文件名基于 `--output` 增加 `_part_XXXX` 后缀。
|
|
|
"""
|
|
|
|
|
|
import argparse
|
|
|
import json
|
|
|
import os
|
|
|
import sys
|
|
|
from typing import Dict, List, Tuple
|
|
|
|
|
|
import h5py
|
|
|
import numpy as np
|
|
|
|
|
|
try:
|
|
|
import cv2
|
|
|
except Exception as e:
|
|
|
print("[ERROR] 缺少依赖 opencv-python,请先运行: pip install opencv-python")
|
|
|
raise
|
|
|
|
|
|
|
|
|
def string_array(lst: List[str]):
|
|
|
"""将 Python 字符串列表转换为 h5py 兼容的字符串数组。"""
|
|
|
dt = h5py.string_dtype(encoding="utf-8")
|
|
|
return np.array(lst, dtype=dt)
|
|
|
|
|
|
|
|
|
def find_episode_videos(obs_root: str, task_id: int, episode_id: int) -> Dict[str, str]:
|
|
|
"""
|
|
|
在 <obs-root>/<episode_id>/videos 或其子目录中查找 MP4。
|
|
|
固定只返回 head_color、hand_left_color、hand_right_color 三路(若存在)。
|
|
|
返回: {raw_camera_key: mp4_path}
|
|
|
"""
|
|
|
candidates: Dict[str, str] = {}
|
|
|
|
|
|
|
|
|
direct_dir = os.path.join(obs_root, str(episode_id), "videos")
|
|
|
if os.path.isdir(direct_dir):
|
|
|
for fn in os.listdir(direct_dir):
|
|
|
if fn.lower().endswith(".mp4"):
|
|
|
key = os.path.splitext(fn)[0]
|
|
|
candidates[key] = os.path.join(direct_dir, fn)
|
|
|
|
|
|
|
|
|
if not candidates:
|
|
|
for root, dirs, files in os.walk(obs_root):
|
|
|
base = os.path.basename(root)
|
|
|
if base == str(episode_id) and "videos" in dirs:
|
|
|
vdir = os.path.join(root, "videos")
|
|
|
for fn in os.listdir(vdir):
|
|
|
if fn.lower().endswith(".mp4"):
|
|
|
key = os.path.splitext(fn)[0]
|
|
|
candidates[key] = os.path.join(vdir, fn)
|
|
|
break
|
|
|
|
|
|
|
|
|
filtered: Dict[str, str] = {}
|
|
|
for k in ["head_color", "hand_left_color", "hand_right_color"]:
|
|
|
if k in candidates:
|
|
|
filtered[k] = candidates[k]
|
|
|
return filtered
|
|
|
|
|
|
|
|
|
def read_video_meta(path: str) -> Tuple[int, int, int, int, float]:
|
|
|
"""读取视频的基础元信息:(frame_count, width, height, channels, fps)。channels 固定为 3。"""
|
|
|
cap = cv2.VideoCapture(path)
|
|
|
if not cap.isOpened():
|
|
|
raise RuntimeError(f"无法打开视频: {path}")
|
|
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
|
fps = float(cap.get(cv2.CAP_PROP_FPS) or 0.0)
|
|
|
if fps <= 0:
|
|
|
|
|
|
fps = 30.0
|
|
|
cap.release()
|
|
|
return frame_count, width, height, 3, fps
|
|
|
|
|
|
|
|
|
def write_video_slice_to_dataset(mp4_path: str, dset: h5py.Dataset, start_idx: int, count: int) -> int:
|
|
|
"""
|
|
|
将 mp4 指定区间 [start_idx, start_idx+count) 按帧流式写入 HDF5 dset。
|
|
|
返回实际写入帧数。
|
|
|
"""
|
|
|
cap = cv2.VideoCapture(mp4_path)
|
|
|
if not cap.isOpened():
|
|
|
raise RuntimeError(f"无法打开视频: {mp4_path}")
|
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, max(0, int(start_idx)))
|
|
|
t = 0
|
|
|
while t < count:
|
|
|
ok, frame_bgr = cap.read()
|
|
|
if not ok:
|
|
|
break
|
|
|
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
|
|
|
if frame_rgb.dtype != np.uint8:
|
|
|
frame_rgb = frame_rgb.astype(np.uint8)
|
|
|
dset[t, ...] = frame_rgb
|
|
|
t += 1
|
|
|
cap.release()
|
|
|
if t < count:
|
|
|
print(f"[WARN] {os.path.basename(mp4_path)} 仅写入 {t}/{count} 帧 (start={start_idx})")
|
|
|
return t
|
|
|
|
|
|
|
|
|
def build_h5_shard(
|
|
|
output_path: str,
|
|
|
dataset_name: str,
|
|
|
task_json_path: str,
|
|
|
obs_root: str,
|
|
|
task_id_filter: int,
|
|
|
dataset_source: str = "AgiBot World",
|
|
|
dataset_version: str = "alpha",
|
|
|
default_robot_type: str = "unknown",
|
|
|
max_traj_per_shard: int = 150,
|
|
|
) -> None:
|
|
|
"""主流程:读取 JSON 和 MP4,生成 HDF5 分片。"""
|
|
|
with open(task_json_path, "r", encoding="utf-8") as f:
|
|
|
episodes = json.load(f)
|
|
|
if not isinstance(episodes, list):
|
|
|
raise ValueError("task_json 内容应为列表(list)")
|
|
|
|
|
|
|
|
|
|
|
|
ep_pool = []
|
|
|
for ep in episodes:
|
|
|
try:
|
|
|
ep_id = int(ep.get("episode_id"))
|
|
|
t_id = int(ep.get("task_id"))
|
|
|
except Exception:
|
|
|
continue
|
|
|
if t_id != task_id_filter:
|
|
|
continue
|
|
|
vids = find_episode_videos(obs_root, task_id_filter, ep_id)
|
|
|
if not vids:
|
|
|
|
|
|
continue
|
|
|
|
|
|
cam_metas = {}
|
|
|
for k, mp4 in vids.items():
|
|
|
fc, w, h, ch, fps = read_video_meta(mp4)
|
|
|
cam_metas[k] = (fc, w, h, ch, fps, mp4)
|
|
|
|
|
|
camera_order = ["head_color", "hand_left_color", "hand_right_color"]
|
|
|
present_cams = [c for c in camera_order if c in cam_metas]
|
|
|
view_names = []
|
|
|
for c in present_cams:
|
|
|
if c == "head_color":
|
|
|
view_names.append("head")
|
|
|
elif c == "hand_left_color":
|
|
|
view_names.append("left")
|
|
|
elif c == "hand_right_color":
|
|
|
view_names.append("right")
|
|
|
if present_cams:
|
|
|
print(f"[FOUND] episode {ep_id} 找到视频视角: {', '.join(view_names)}")
|
|
|
actions = (ep.get("label_info") or {}).get("action_config", [])
|
|
|
if not actions:
|
|
|
print(f"[INFO] episode {ep_id} 无 action_config,跳过")
|
|
|
continue
|
|
|
ep_pool.append((ep, vids, cam_metas, actions))
|
|
|
|
|
|
if not ep_pool:
|
|
|
raise RuntimeError("未找到任何包含动作切片的 episode,请检查 JSON 与目录。")
|
|
|
|
|
|
|
|
|
|
|
|
total_actions_valid = 0
|
|
|
for ep, vids, cam_metas, actions in ep_pool:
|
|
|
camera_order = ["head_color", "hand_left_color", "hand_right_color"]
|
|
|
present_cams = [c for c in camera_order if c in cam_metas]
|
|
|
for act in actions:
|
|
|
try:
|
|
|
s = int(act.get("start_frame", 0))
|
|
|
e = int(act.get("end_frame", 0))
|
|
|
except Exception:
|
|
|
continue
|
|
|
per_cam_len = []
|
|
|
for c in present_cams:
|
|
|
total = cam_metas[c][0]
|
|
|
if s >= total:
|
|
|
length = 0
|
|
|
else:
|
|
|
length = max(0, min(e, total - 1) - s + 1)
|
|
|
per_cam_len.append(length)
|
|
|
slice_len = min(per_cam_len) if per_cam_len else 0
|
|
|
if slice_len > 0:
|
|
|
total_actions_valid += 1
|
|
|
|
|
|
|
|
|
def _make_shard_path(base: str, idx: int) -> str:
|
|
|
base = os.path.abspath(base)
|
|
|
d = os.path.dirname(base)
|
|
|
stem = os.path.splitext(os.path.basename(base))[0]
|
|
|
return os.path.join(d, f"{stem}_part_{idx:04d}.h5")
|
|
|
|
|
|
|
|
|
def _open_shard(idx: int):
|
|
|
path = _make_shard_path(output_path, idx)
|
|
|
h5 = h5py.File(path, "w")
|
|
|
grp = h5.create_group(f"/{dataset_name}_0")
|
|
|
grp.attrs["dataset_source"] = dataset_source
|
|
|
grp.attrs["dataset_version"] = dataset_version
|
|
|
print(f"[SHARD] 开始写入分片 {idx} -> {path}")
|
|
|
return h5, grp, path
|
|
|
|
|
|
shard_idx = 0
|
|
|
h5, grp_dataset, current_shard_path = _open_shard(shard_idx)
|
|
|
traj_count_in_shard = 0
|
|
|
total_traj_written = 0
|
|
|
processed_actions = 0
|
|
|
|
|
|
try:
|
|
|
for ep, vids, cam_metas, actions in ep_pool:
|
|
|
ep_id = int(ep.get("episode_id"))
|
|
|
scene_text = (ep.get("init_scene_text") or "")
|
|
|
|
|
|
|
|
|
camera_order = ["head_color", "hand_left_color", "hand_right_color"]
|
|
|
present_cams = [c for c in camera_order if c in cam_metas]
|
|
|
view_names = []
|
|
|
for c in present_cams:
|
|
|
if c == "head_color":
|
|
|
view_names.append("head")
|
|
|
elif c == "hand_left_color":
|
|
|
view_names.append("left")
|
|
|
elif c == "hand_right_color":
|
|
|
view_names.append("right")
|
|
|
|
|
|
|
|
|
ref_fps = cam_metas[present_cams[0]][4] if present_cams else 30.0
|
|
|
|
|
|
for aidx, act in enumerate(actions):
|
|
|
try:
|
|
|
s = int(act.get("start_frame", 0))
|
|
|
e = int(act.get("end_frame", 0))
|
|
|
except Exception:
|
|
|
continue
|
|
|
action_text = (act.get("action_text") or "")
|
|
|
skill = (act.get("skill") or "")
|
|
|
|
|
|
|
|
|
|
|
|
per_cam_len = []
|
|
|
for c in present_cams:
|
|
|
total = cam_metas[c][0]
|
|
|
if s >= total:
|
|
|
length = 0
|
|
|
else:
|
|
|
length = max(0, min(e, total - 1) - s + 1)
|
|
|
per_cam_len.append(length)
|
|
|
slice_len = min(per_cam_len) if per_cam_len else 0
|
|
|
if slice_len <= 0:
|
|
|
print(f"[WARN] episode {ep_id} action[{aidx}]({s}-{e}) 无有效帧,跳过")
|
|
|
continue
|
|
|
|
|
|
|
|
|
traj_grp = grp_dataset.create_group(f"traj_{traj_count_in_shard:04d}")
|
|
|
traj_grp.attrs["task"] = action_text
|
|
|
|
|
|
traj_grp.attrs["task_id"] = f"{task_id_filter}_act_{aidx:03d}"
|
|
|
traj_grp.attrs["episode_id"] = str(ep_id)
|
|
|
traj_grp.attrs["scene_id"] = scene_text
|
|
|
traj_grp.attrs["robot_type"] = default_robot_type
|
|
|
traj_grp.attrs["success"] = 1
|
|
|
traj_grp.attrs["num_frames"] = int(slice_len)
|
|
|
traj_grp.attrs["fps"] = float(ref_fps)
|
|
|
traj_grp.attrs["duration_sec"] = float(slice_len) / float(ref_fps)
|
|
|
traj_grp.attrs["camera_views"] = string_array(view_names)
|
|
|
|
|
|
|
|
|
for c in present_cams:
|
|
|
_, w, h, _, _, mp4_path = cam_metas[c]
|
|
|
|
|
|
if c == "head_color":
|
|
|
dname = "images_head"
|
|
|
elif c == "hand_left_color":
|
|
|
dname = "images_left"
|
|
|
else:
|
|
|
dname = "images_right"
|
|
|
|
|
|
dset = traj_grp.create_dataset(
|
|
|
name=dname,
|
|
|
shape=(slice_len, h, w, 3),
|
|
|
dtype=np.uint8,
|
|
|
chunks=(1, h, w, 3),
|
|
|
compression="gzip",
|
|
|
compression_opts=4,
|
|
|
)
|
|
|
written = write_video_slice_to_dataset(mp4_path, dset, start_idx=s, count=slice_len)
|
|
|
if written < slice_len:
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
prog = np.linspace(0.0, 1.0, num=slice_len, dtype=np.float32)
|
|
|
done = np.zeros((slice_len,), dtype=np.bool_)
|
|
|
done[-1] = True
|
|
|
value = np.zeros((slice_len,), dtype=np.float32)
|
|
|
|
|
|
traj_grp.create_dataset("progress", data=prog, dtype=np.float32)
|
|
|
traj_grp.create_dataset("done", data=done, dtype=np.bool_)
|
|
|
traj_grp.create_dataset("value", data=value, dtype=np.float32)
|
|
|
|
|
|
traj_count_in_shard += 1
|
|
|
total_traj_written += 1
|
|
|
processed_actions += 1
|
|
|
|
|
|
sys.stdout.write(
|
|
|
f"\r[PROGRESS] 已写入轨迹 {processed_actions}/{total_actions_valid} (episode {ep_id}, action {aidx})"
|
|
|
)
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
|
if traj_count_in_shard >= max_traj_per_shard:
|
|
|
grp_dataset.attrs["num_trajectories"] = traj_count_in_shard
|
|
|
h5.close()
|
|
|
shard_idx += 1
|
|
|
h5, grp_dataset, current_shard_path = _open_shard(shard_idx)
|
|
|
traj_count_in_shard = 0
|
|
|
|
|
|
|
|
|
grp_dataset.attrs["num_trajectories"] = traj_count_in_shard
|
|
|
h5.close()
|
|
|
|
|
|
if total_actions_valid > 0:
|
|
|
sys.stdout.write("\n")
|
|
|
finally:
|
|
|
|
|
|
try:
|
|
|
if h5 and h5.id:
|
|
|
grp_dataset.attrs["num_trajectories"] = traj_count_in_shard
|
|
|
h5.close()
|
|
|
except Exception:
|
|
|
pass
|
|
|
|
|
|
print(f"✅ 生成完成,共写入轨迹 {total_traj_written},分片数 {shard_idx + 1}")
|
|
|
|
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
|
p = argparse.ArgumentParser(description="AgiBot World: MP4 + JSON → HDF5 分片生成")
|
|
|
p.add_argument("--dataset-name", required=True, help="HDF5 顶层数据集名前缀(如 droid、bridge、agibot_world)")
|
|
|
p.add_argument("--task-json", required=True, help="task_[id].json 路径")
|
|
|
p.add_argument("--obs-root", required=True, help="observations 根目录(包含 <task_id>/<episode_id>/videos)")
|
|
|
p.add_argument("--task-id", type=int, required=True, help="任务 ID(如 327)")
|
|
|
p.add_argument("--output", required=True, help="输出 HDF5 基础文件路径(会生成 _part_XXXX.h5 分片)")
|
|
|
p.add_argument("--max-traj-per-shard", type=int, default=150, help="单个 H5 分片的最大轨迹数(默认 150)")
|
|
|
p.add_argument("--dataset-source", default="AgiBot World", help="@dataset_source 属性值")
|
|
|
p.add_argument("--dataset-version", default="alpha", help="@dataset_version 属性值")
|
|
|
p.add_argument("--robot-type", default="unknown", help="@robot_type 属性默认值")
|
|
|
return p.parse_args()
|
|
|
|
|
|
|
|
|
def main():
|
|
|
args = parse_args()
|
|
|
build_h5_shard(
|
|
|
output_path=args.output,
|
|
|
dataset_name=args.dataset_name,
|
|
|
task_json_path=args.task_json,
|
|
|
obs_root=args.obs_root,
|
|
|
task_id_filter=args.task_id,
|
|
|
dataset_source=args.dataset_source,
|
|
|
dataset_version=args.dataset_version,
|
|
|
default_robot_type=args.robot_type,
|
|
|
max_traj_per_shard=args.max_traj_per_shard,
|
|
|
)
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main() |