|
|
|
|
|
""" |
|
|
解析 H5 文件并导出为 Hugging Face Dataset Viewer 兼容格式 |
|
|
|
|
|
用法: |
|
|
python extract_h5.py # 解析所有 H5 文件 |
|
|
python extract_h5.py --check # 仅检查 H5 文件结构(不解析) |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import h5py |
|
|
import numpy as np |
|
|
from pathlib import Path |
|
|
from PIL import Image |
|
|
from tqdm import tqdm |
|
|
import json |
|
|
from collections import defaultdict |
|
|
|
|
|
|
|
|
def check_h5_structure(base_dir): |
|
|
"""检查所有 H5 文件的字段结构""" |
|
|
folder_keys = defaultdict(lambda: defaultdict(set)) |
|
|
|
|
|
h5_folders = [d for d in base_dir.iterdir() if d.is_dir() and d.name.endswith('_h5')] |
|
|
|
|
|
for h5_folder in sorted(h5_folders): |
|
|
h5_files = list(h5_folder.rglob('*.h5')) |
|
|
print(f"\n{'='*60}") |
|
|
print(f"文件夹: {h5_folder.name} ({len(h5_files)} 个文件)") |
|
|
print('='*60) |
|
|
|
|
|
|
|
|
for h5_path in h5_files[:3]: |
|
|
print(f"\n {h5_path.name}:") |
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
for key in sorted(f.keys()): |
|
|
arr = f[key] |
|
|
print(f" - {key}: shape={arr.shape}, dtype={arr.dtype}") |
|
|
folder_keys[h5_folder.name][key].add(str(arr.shape)) |
|
|
|
|
|
|
|
|
print(f"\n 汇总 (检查全部 {len(h5_files)} 个文件):") |
|
|
for h5_path in h5_files: |
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
for key in f.keys(): |
|
|
folder_keys[h5_folder.name][key].add(str(f[key].shape)) |
|
|
|
|
|
for key, shapes in sorted(folder_keys[h5_folder.name].items()): |
|
|
print(f" - {key}: shapes={list(shapes)}") |
|
|
|
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("总汇总 - 所有文件夹的字段:") |
|
|
print('='*60) |
|
|
for folder, keys in sorted(folder_keys.items()): |
|
|
print(f"\n{folder}:") |
|
|
for key, shapes in sorted(keys.items()): |
|
|
print(f" - {key}: {list(shapes)}") |
|
|
|
|
|
|
|
|
def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 pose_data 文件,按帧展开""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
|
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
keys = list(f.keys()) |
|
|
num_frames = len(f['timestamps'][:]) if 'timestamps' in keys else 0 |
|
|
|
|
|
|
|
|
data_cache = {} |
|
|
image_paths = {} |
|
|
|
|
|
for key in keys: |
|
|
arr = f[key][:] |
|
|
|
|
|
if arr.dtype == np.uint8: |
|
|
if len(arr.shape) == 3: |
|
|
filename = "bg.png" |
|
|
Image.fromarray(arr).save(episode_dir / filename) |
|
|
data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}" |
|
|
|
|
|
elif len(arr.shape) == 4: |
|
|
paths = [] |
|
|
for i, img in enumerate(arr): |
|
|
filename = f"{key}_{i:04d}.png" |
|
|
Image.fromarray(img).save(episode_dir / filename) |
|
|
paths.append(f"{rel_prefix}/{filename}") |
|
|
image_paths[key] = paths |
|
|
|
|
|
elif len(arr.shape) == 5: |
|
|
num_views = arr.shape[1] |
|
|
paths = [] |
|
|
for frame_idx in range(arr.shape[0]): |
|
|
frame_paths = [] |
|
|
for view_idx in range(num_views): |
|
|
filename = f"{key}_f{frame_idx:04d}_v{view_idx}.png" |
|
|
Image.fromarray(arr[frame_idx, view_idx]).save(episode_dir / filename) |
|
|
frame_paths.append(f"{rel_prefix}/{filename}") |
|
|
paths.append(frame_paths) |
|
|
image_paths[key] = paths |
|
|
data_cache[f"{key}_num_views"] = num_views |
|
|
else: |
|
|
data_cache[key] = arr.tolist() |
|
|
|
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
record = { |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
} |
|
|
|
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
|
|
|
for key, paths in image_paths.items(): |
|
|
if isinstance(paths[0], list): |
|
|
for v_idx, p in enumerate(paths[frame_idx]): |
|
|
if v_idx == 0: |
|
|
record["file_name"] = p |
|
|
record[f"image_v{v_idx}"] = p |
|
|
else: |
|
|
record["file_name"] = paths[frame_idx] |
|
|
record["image"] = paths[frame_idx] |
|
|
|
|
|
|
|
|
for key, val in data_cache.items(): |
|
|
if key.endswith("_image") or key.endswith("_num_views"): |
|
|
record[key] = val |
|
|
|
|
|
|
|
|
if 'timestamps' in data_cache: |
|
|
record["timestamp"] = data_cache['timestamps'][frame_idx] |
|
|
if 'rotations' in data_cache: |
|
|
record["rotation"] = data_cache['rotations'][frame_idx] |
|
|
if 'translations' in data_cache: |
|
|
record["translation"] = data_cache['translations'][frame_idx] |
|
|
if 'tactile' in data_cache: |
|
|
record["tactile"] = data_cache['tactile'][frame_idx] |
|
|
if 'xela' in data_cache: |
|
|
record["xela"] = data_cache['xela'][frame_idx] |
|
|
|
|
|
record["num_frames"] = num_frames |
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_tacniq_gsmini(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 tacniq_gsmini 文件,按帧展开""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
|
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
bg = f['bg'][:] |
|
|
gsmini = f['gsmini'][:] |
|
|
tacniq = f['tacniq'][:].tolist() |
|
|
|
|
|
Image.fromarray(bg).save(episode_dir / "bg.png") |
|
|
bg_path = f"{rel_prefix}/bg.png" |
|
|
|
|
|
num_frames = len(gsmini) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
filename = f"gsmini_{frame_idx:04d}.png" |
|
|
Image.fromarray(gsmini[frame_idx]).save(episode_dir / filename) |
|
|
|
|
|
record = { |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
"file_name": f"{rel_prefix}/{filename}", |
|
|
"image": f"{rel_prefix}/{filename}", |
|
|
"bg_image": bg_path, |
|
|
"tacniq": tacniq[frame_idx] if frame_idx < len(tacniq) else None, |
|
|
"num_frames": num_frames, |
|
|
} |
|
|
|
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_xela_9dtact(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 xela_9dtact 文件,按帧展开""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
|
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
bg = f['bg'][:] |
|
|
dtact = f['9dtact'][:] |
|
|
xela = f['xela'][:].tolist() |
|
|
|
|
|
Image.fromarray(bg).save(episode_dir / "bg.png") |
|
|
bg_path = f"{rel_prefix}/bg.png" |
|
|
|
|
|
num_frames = len(dtact) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
filename = f"9dtact_{frame_idx:04d}.png" |
|
|
Image.fromarray(dtact[frame_idx]).save(episode_dir / filename) |
|
|
|
|
|
record = { |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
"file_name": f"{rel_prefix}/{filename}", |
|
|
"image": f"{rel_prefix}/{filename}", |
|
|
"bg_image": bg_path, |
|
|
"xela": xela[frame_idx] if frame_idx < len(xela) else None, |
|
|
"num_frames": num_frames, |
|
|
} |
|
|
|
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_force_data(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 force_data 文件,按帧展开""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
|
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
keys = list(f.keys()) |
|
|
|
|
|
num_frames = 0 |
|
|
data_cache = {} |
|
|
image_paths = {} |
|
|
|
|
|
for key in keys: |
|
|
arr = f[key][:] |
|
|
|
|
|
if arr.dtype == np.uint8: |
|
|
if len(arr.shape) == 3: |
|
|
filename = f"{key}.png" |
|
|
Image.fromarray(arr).save(episode_dir / filename) |
|
|
data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}" |
|
|
elif len(arr.shape) == 4: |
|
|
num_frames = max(num_frames, len(arr)) |
|
|
paths = [] |
|
|
for i, img in enumerate(arr): |
|
|
filename = f"{key}_{i:04d}.png" |
|
|
Image.fromarray(img).save(episode_dir / filename) |
|
|
paths.append(f"{rel_prefix}/{filename}") |
|
|
image_paths[key] = paths |
|
|
else: |
|
|
data_cache[key] = arr.tolist() |
|
|
if len(arr.shape) >= 1: |
|
|
num_frames = max(num_frames, len(arr)) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
record = { |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
"num_frames": num_frames, |
|
|
} |
|
|
|
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
for key, paths in image_paths.items(): |
|
|
if frame_idx < len(paths): |
|
|
record["file_name"] = paths[frame_idx] |
|
|
record["image"] = paths[frame_idx] |
|
|
|
|
|
for key, val in data_cache.items(): |
|
|
if key.endswith("_image"): |
|
|
record[key] = val |
|
|
elif isinstance(val, list) and frame_idx < len(val): |
|
|
record[key] = val[frame_idx] |
|
|
|
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_all(base_dir): |
|
|
"""解析所有 H5 文件""" |
|
|
h5_folders = [d for d in base_dir.iterdir() if d.is_dir() and d.name.endswith('_h5')] |
|
|
|
|
|
for h5_folder in h5_folders: |
|
|
output_folder = base_dir / h5_folder.name.replace('_h5', '') |
|
|
output_folder.mkdir(exist_ok=True) |
|
|
|
|
|
h5_files = list(h5_folder.rglob('*.h5')) |
|
|
print(f"\n处理 {h5_folder.name}: {len(h5_files)} 个文件 -> {output_folder.name}/") |
|
|
|
|
|
all_records = [] |
|
|
|
|
|
for h5_path in tqdm(h5_files, desc=h5_folder.name): |
|
|
relative = h5_path.relative_to(h5_folder) |
|
|
sub_output_dir = output_folder / relative.parent |
|
|
sub_output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
episode_id = h5_path.stem |
|
|
subset_path = str(relative.parent) if relative.parent != Path('.') else "" |
|
|
|
|
|
try: |
|
|
if 'pose_data' in h5_folder.name: |
|
|
records = extract_pose_data(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'tacniq_gsmini' in h5_folder.name: |
|
|
records = extract_tacniq_gsmini(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'xela_9dtact' in h5_folder.name: |
|
|
records = extract_xela_9dtact(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'force_data' in h5_folder.name: |
|
|
records = extract_force_data(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
else: |
|
|
continue |
|
|
|
|
|
all_records.extend(records) |
|
|
|
|
|
episode_dir = sub_output_dir / episode_id |
|
|
json_path = episode_dir / "metadata.json" |
|
|
with open(json_path, 'w') as f: |
|
|
json.dump(records, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\nError: {h5_path}: {e}") |
|
|
|
|
|
jsonl_path = output_folder / "metadata.jsonl" |
|
|
with open(jsonl_path, 'w') as f: |
|
|
for record in all_records: |
|
|
f.write(json.dumps(record, ensure_ascii=False) + '\n') |
|
|
|
|
|
print(f" 已生成 {len(all_records)} 条记录 -> {jsonl_path}") |
|
|
|
|
|
print("\n解析完成!") |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="解析 H5 文件为 HuggingFace Dataset 格式") |
|
|
parser.add_argument('--check', action='store_true', help='仅检查 H5 文件结构(不解析)') |
|
|
args = parser.parse_args() |
|
|
|
|
|
base_dir = Path(__file__).parent |
|
|
|
|
|
if args.check: |
|
|
check_h5_structure(base_dir) |
|
|
else: |
|
|
extract_all(base_dir) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|