|
|
|
|
|
""" |
|
|
数据集预处理统一入口 |
|
|
|
|
|
用法: |
|
|
python preprocess.py extract # 解析 H5 文件 |
|
|
python preprocess.py extract --check # 仅检查 H5 结构 |
|
|
python preprocess.py extract --update # 更新 metadata(添加热力图/视频路径) |
|
|
python preprocess.py heatmap # 生成热力图 |
|
|
python preprocess.py heatmap --test # 测试热力图生成 |
|
|
python preprocess.py marker_flow # 生成 xela marker flow 可视化 |
|
|
python preprocess.py marker_flow --test # 测试 marker flow 生成 |
|
|
python preprocess.py video # 生成视频 |
|
|
python preprocess.py video --test # 测试视频生成 |
|
|
python preprocess.py pack # 打包图像为 tar 文件 |
|
|
python preprocess.py pack --delete # 打包后删除原始图像 |
|
|
python preprocess.py unpack # 解压 tar 文件 |
|
|
python preprocess.py unpack --delete # 解压后删除 tar 文件 |
|
|
python preprocess.py clean # 删除所有 PNG,只保留视频 |
|
|
python preprocess.py upload # 上传到 Hugging Face |
|
|
python preprocess.py upload --sync # 同步上传(删除远端多余文件) |
|
|
python preprocess.py all # 完整流程(extract -> heatmap -> video -> update) |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import json |
|
|
import subprocess |
|
|
import tempfile |
|
|
import inspect |
|
|
from pathlib import Path |
|
|
from collections import defaultdict |
|
|
|
|
|
import h5py |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
from tqdm import tqdm |
|
|
import matplotlib |
|
|
matplotlib.use('Agg') |
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BASE_DIR = Path(__file__).parent |
|
|
|
|
|
|
|
|
TACTILE_VMIN = 15 |
|
|
TACTILE_VMAX = 750 |
|
|
TACTILE_CMAP = 'plasma' |
|
|
XELA_VMIN = -5 |
|
|
XELA_VMAX = 5 |
|
|
XELA_CMAP = 'RdBu_r' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_tactile_heatmap(data, output_path, rows=11, cols=6): |
|
|
"""保存 tactile 热力图""" |
|
|
data = np.array(data) |
|
|
if len(data.shape) == 1: |
|
|
if len(data) == rows * cols: |
|
|
data = data.reshape(rows, cols) |
|
|
else: |
|
|
data = data.reshape(1, -1) |
|
|
|
|
|
fig, ax = plt.subplots(figsize=(cols * 0.5, rows * 0.5)) |
|
|
ax.imshow(data, cmap=TACTILE_CMAP, aspect='equal', interpolation='nearest', |
|
|
vmin=TACTILE_VMIN, vmax=TACTILE_VMAX) |
|
|
ax.axis('off') |
|
|
plt.savefig(output_path, dpi=80, bbox_inches='tight', pad_inches=0) |
|
|
plt.close(fig) |
|
|
|
|
|
|
|
|
def save_xela_heatmap(data, output_path): |
|
|
"""保存 xela 热力图(Z轴热力图 + XY箭头)""" |
|
|
data = np.array(data) |
|
|
|
|
|
if len(data) == 72: |
|
|
data = data.reshape(4, 6, 3) |
|
|
fx, fy, fz = data[:, :, 0], data[:, :, 1], data[:, :, 2] |
|
|
|
|
|
fig, ax = plt.subplots(figsize=(4, 3)) |
|
|
ax.imshow(fz, cmap=XELA_CMAP, aspect='equal', interpolation='nearest', |
|
|
vmin=XELA_VMIN, vmax=XELA_VMAX) |
|
|
|
|
|
rows, cols = 4, 6 |
|
|
y_grid, x_grid = np.mgrid[0:rows, 0:cols] |
|
|
magnitude = np.sqrt(fx**2 + fy**2) |
|
|
max_mag = magnitude.max() if magnitude.max() > 0 else 1 |
|
|
scale = 0.4 / max_mag |
|
|
|
|
|
ax.quiver(x_grid, y_grid, fx * scale, -fy * scale, |
|
|
color='black', scale=1, scale_units='xy', |
|
|
width=0.02, headwidth=3, headlength=2) |
|
|
ax.axis('off') |
|
|
plt.savefig(output_path, dpi=100, bbox_inches='tight', pad_inches=0) |
|
|
plt.close(fig) |
|
|
else: |
|
|
fig, ax = plt.subplots(figsize=(6, 1)) |
|
|
ax.imshow(data.reshape(1, -1), cmap=XELA_CMAP, aspect='auto', |
|
|
vmin=XELA_VMIN, vmax=XELA_VMAX) |
|
|
ax.axis('off') |
|
|
plt.savefig(output_path, dpi=80, bbox_inches='tight', pad_inches=0) |
|
|
plt.close(fig) |
|
|
|
|
|
|
|
|
def save_xela_marker_flow(data, output_path): |
|
|
""" |
|
|
保存 xela marker flow 可视化 |
|
|
- 网格上的圆点根据 XY 力偏移(与箭头方向一致) |
|
|
- Z 轴力用圆点大小和颜色表示 |
|
|
""" |
|
|
data = np.array(data) |
|
|
|
|
|
if len(data) != 72: |
|
|
return |
|
|
|
|
|
data = data.reshape(4, 6, 3) |
|
|
fx, fy, fz = data[:, :, 0], data[:, :, 1], data[:, :, 2] |
|
|
|
|
|
|
|
|
magnitude = np.sqrt(fx**2 + fy**2) |
|
|
max_mag = magnitude.max() if magnitude.max() > 0 else 1 |
|
|
scale = 0.4 / max_mag |
|
|
|
|
|
rows, cols = 4, 6 |
|
|
fig, ax = plt.subplots(figsize=(6, 4)) |
|
|
|
|
|
|
|
|
bg = np.ones((rows, cols)) * 0.95 |
|
|
ax.imshow(bg, cmap='gray', vmin=0, vmax=1, aspect='equal') |
|
|
|
|
|
|
|
|
for i in range(rows): |
|
|
for j in range(cols): |
|
|
ax.plot(j, i, 'o', color='#cccccc', markersize=8) |
|
|
|
|
|
|
|
|
for i in range(rows): |
|
|
for j in range(cols): |
|
|
|
|
|
dx = fx[i, j] * scale |
|
|
dy = -fy[i, j] * scale |
|
|
|
|
|
|
|
|
new_x = j + dx |
|
|
new_y = i + dy |
|
|
|
|
|
|
|
|
ax.plot([j, new_x], [i, new_y], '-', color='#888888', linewidth=1, alpha=0.5) |
|
|
|
|
|
|
|
|
z_normalized = abs(fz[i, j]) / XELA_VMAX |
|
|
size = 8 + z_normalized * 15 |
|
|
size = min(max(size, 6), 25) |
|
|
|
|
|
|
|
|
if fz[i, j] > 0: |
|
|
color = '#e74c3c' |
|
|
else: |
|
|
color = '#3498db' |
|
|
|
|
|
ax.plot(new_x, new_y, 'o', color=color, markersize=size, |
|
|
markeredgecolor='white', markeredgewidth=0.5) |
|
|
|
|
|
ax.axis('off') |
|
|
plt.savefig(output_path, dpi=100, bbox_inches='tight', pad_inches=0.1) |
|
|
plt.close(fig) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_h5_structure(): |
|
|
"""检查 H5 文件结构""" |
|
|
folder_keys = defaultdict(lambda: defaultdict(set)) |
|
|
h5_folders = [d for d in BASE_DIR.iterdir() if d.is_dir() and d.name.endswith('_h5')] |
|
|
|
|
|
for h5_folder in sorted(h5_folders): |
|
|
h5_files = list(h5_folder.rglob('*.h5')) |
|
|
print(f"\n{'='*60}\n文件夹: {h5_folder.name} ({len(h5_files)} 个文件)\n{'='*60}") |
|
|
|
|
|
for h5_path in h5_files[:3]: |
|
|
print(f"\n {h5_path.name}:") |
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
for key in sorted(f.keys()): |
|
|
arr = f[key] |
|
|
print(f" - {key}: shape={arr.shape}, dtype={arr.dtype}") |
|
|
folder_keys[h5_folder.name][key].add(str(arr.shape)) |
|
|
|
|
|
for h5_path in h5_files: |
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
for key in f.keys(): |
|
|
folder_keys[h5_folder.name][key].add(str(f[key].shape)) |
|
|
|
|
|
print(f"\n 汇总:") |
|
|
for key, shapes in sorted(folder_keys[h5_folder.name].items()): |
|
|
print(f" - {key}: {list(shapes)}") |
|
|
|
|
|
|
|
|
def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 pose_data H5 文件""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
keys = list(f.keys()) |
|
|
num_frames = len(f['timestamps'][:]) if 'timestamps' in keys else 0 |
|
|
data_cache = {} |
|
|
image_paths = {} |
|
|
|
|
|
for key in keys: |
|
|
arr = f[key][:] |
|
|
if arr.dtype == np.uint8: |
|
|
if len(arr.shape) == 3: |
|
|
filename = "bg.png" |
|
|
Image.fromarray(arr).save(episode_dir / filename) |
|
|
data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}" |
|
|
elif len(arr.shape) == 4: |
|
|
paths = [] |
|
|
for i, img in enumerate(arr): |
|
|
filename = f"{key}_{i:04d}.png" |
|
|
Image.fromarray(img).save(episode_dir / filename) |
|
|
paths.append(f"{rel_prefix}/{filename}") |
|
|
image_paths[key] = paths |
|
|
elif len(arr.shape) == 5: |
|
|
num_samples = arr.shape[1] |
|
|
paths = [] |
|
|
for frame_idx in range(arr.shape[0]): |
|
|
frame_paths = [] |
|
|
for sample_idx in range(num_samples): |
|
|
filename = f"{key}_f{frame_idx:04d}_s{sample_idx}.png" |
|
|
Image.fromarray(arr[frame_idx, sample_idx]).save(episode_dir / filename) |
|
|
frame_paths.append(f"{rel_prefix}/{filename}") |
|
|
paths.append(frame_paths) |
|
|
image_paths[key] = paths |
|
|
data_cache[f"{key}_num_samples"] = num_samples |
|
|
else: |
|
|
data_cache[key] = arr.tolist() |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
record = {"episode_id": episode_id, "frame_idx": frame_idx} |
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
for key, paths in image_paths.items(): |
|
|
if isinstance(paths[0], list): |
|
|
for s_idx, p in enumerate(paths[frame_idx]): |
|
|
if s_idx == 0: |
|
|
record["file_name"] = p |
|
|
record[f"image_s{s_idx}"] = p |
|
|
else: |
|
|
record["file_name"] = paths[frame_idx] |
|
|
|
|
|
for key, val in data_cache.items(): |
|
|
if key.endswith("_image") or key.endswith("_num_samples"): |
|
|
record[key] = val |
|
|
|
|
|
if 'timestamps' in data_cache: |
|
|
record["timestamp"] = data_cache['timestamps'][frame_idx] |
|
|
if 'rotations' in data_cache: |
|
|
record["rotation"] = data_cache['rotations'][frame_idx] |
|
|
if 'translations' in data_cache: |
|
|
record["translation"] = data_cache['translations'][frame_idx] |
|
|
if 'tactile' in data_cache: |
|
|
record["tactile"] = data_cache['tactile'][frame_idx] |
|
|
if 'xela' in data_cache: |
|
|
record["xela"] = data_cache['xela'][frame_idx] |
|
|
|
|
|
record["num_frames"] = num_frames |
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_force_data(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 force_data H5 文件""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
keys = list(f.keys()) |
|
|
num_frames = 0 |
|
|
data_cache = {} |
|
|
image_paths = {} |
|
|
|
|
|
for key in keys: |
|
|
arr = f[key][:] |
|
|
if arr.dtype == np.uint8: |
|
|
if len(arr.shape) == 3: |
|
|
filename = f"{key}.png" |
|
|
Image.fromarray(arr).save(episode_dir / filename) |
|
|
data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}" |
|
|
elif len(arr.shape) == 4: |
|
|
num_frames = max(num_frames, len(arr)) |
|
|
paths = [] |
|
|
for i, img in enumerate(arr): |
|
|
filename = f"{key}_{i:04d}.png" |
|
|
Image.fromarray(img).save(episode_dir / filename) |
|
|
paths.append(f"{rel_prefix}/{filename}") |
|
|
image_paths[key] = paths |
|
|
else: |
|
|
data_cache[key] = arr.tolist() |
|
|
if len(arr.shape) >= 1: |
|
|
num_frames = max(num_frames, len(arr)) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
record = {"episode_id": episode_id, "frame_idx": frame_idx, "num_frames": num_frames} |
|
|
if subset_path: |
|
|
record["subset"] = subset_path |
|
|
|
|
|
for key, paths in image_paths.items(): |
|
|
if frame_idx < len(paths): |
|
|
record["file_name"] = paths[frame_idx] |
|
|
|
|
|
for key, val in data_cache.items(): |
|
|
if key.endswith("_image"): |
|
|
record[key] = val |
|
|
elif isinstance(val, list) and frame_idx < len(val): |
|
|
record[key] = val[frame_idx] |
|
|
|
|
|
records.append(record) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_tacniq_gsmini(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 tacniq_gsmini H5 文件""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
gsmini_dir = episode_dir / "gsmini" |
|
|
gsmini_dir.mkdir(parents=True, exist_ok=True) |
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
bg = f['bg'][:] |
|
|
gsmini = f['gsmini'][:] |
|
|
tacniq = f['tacniq'][:].tolist() |
|
|
|
|
|
Image.fromarray(bg).save(episode_dir / "bg.png") |
|
|
num_frames = len(gsmini) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
gsmini_filename = f"frame_{frame_idx:04d}.png" |
|
|
Image.fromarray(gsmini[frame_idx]).save(gsmini_dir / gsmini_filename) |
|
|
|
|
|
records.append({ |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
"file_name": f"{rel_prefix}/gsmini/{gsmini_filename}", |
|
|
"gsmini_image": f"{rel_prefix}/gsmini/{gsmini_filename}", |
|
|
"bg_image": f"{rel_prefix}/bg.png", |
|
|
"tacniq": tacniq[frame_idx] if frame_idx < len(tacniq) else None, |
|
|
"num_frames": num_frames, |
|
|
"subset": subset_path if subset_path else None, |
|
|
}) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_xela_9dtact(h5_path, output_dir, episode_id, subset_path=""): |
|
|
"""解析 xela_9dtact H5 文件""" |
|
|
episode_dir = output_dir / episode_id |
|
|
episode_dir.mkdir(parents=True, exist_ok=True) |
|
|
dtact_dir = episode_dir / "9dtact" |
|
|
dtact_dir.mkdir(parents=True, exist_ok=True) |
|
|
rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id |
|
|
records = [] |
|
|
|
|
|
with h5py.File(h5_path, 'r') as f: |
|
|
bg = f['bg'][:] |
|
|
dtact = f['9dtact'][:] |
|
|
xela = f['xela'][:].tolist() |
|
|
|
|
|
Image.fromarray(bg).save(episode_dir / "bg.png") |
|
|
num_frames = len(dtact) |
|
|
|
|
|
for frame_idx in range(num_frames): |
|
|
dtact_filename = f"frame_{frame_idx:04d}.png" |
|
|
Image.fromarray(dtact[frame_idx]).save(dtact_dir / dtact_filename) |
|
|
|
|
|
records.append({ |
|
|
"episode_id": episode_id, |
|
|
"frame_idx": frame_idx, |
|
|
"file_name": f"{rel_prefix}/9dtact/{dtact_filename}", |
|
|
"dtact_image": f"{rel_prefix}/9dtact/{dtact_filename}", |
|
|
"bg_image": f"{rel_prefix}/bg.png", |
|
|
"xela": xela[frame_idx] if frame_idx < len(xela) else None, |
|
|
"num_frames": num_frames, |
|
|
"subset": subset_path if subset_path else None, |
|
|
}) |
|
|
|
|
|
return records |
|
|
|
|
|
|
|
|
def extract_all(): |
|
|
"""解析所有 H5 文件""" |
|
|
h5_folders = [d for d in BASE_DIR.iterdir() if d.is_dir() and d.name.endswith('_h5')] |
|
|
|
|
|
for h5_folder in h5_folders: |
|
|
output_folder = BASE_DIR / h5_folder.name.replace('_h5', '') |
|
|
output_folder.mkdir(exist_ok=True) |
|
|
|
|
|
h5_files = list(h5_folder.rglob('*.h5')) |
|
|
print(f"\n解析 {h5_folder.name}: {len(h5_files)} 个文件") |
|
|
|
|
|
all_records = [] |
|
|
|
|
|
for h5_path in tqdm(h5_files, desc=h5_folder.name): |
|
|
relative = h5_path.relative_to(h5_folder) |
|
|
sub_output_dir = output_folder / relative.parent |
|
|
sub_output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
episode_id = h5_path.stem |
|
|
subset_path = str(relative.parent) if relative.parent != Path('.') else "" |
|
|
|
|
|
try: |
|
|
if 'pose_data' in h5_folder.name: |
|
|
records = extract_pose_data(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'tacniq_gsmini' in h5_folder.name: |
|
|
records = extract_tacniq_gsmini(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'xela_9dtact' in h5_folder.name: |
|
|
records = extract_xela_9dtact(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
elif 'force_data' in h5_folder.name: |
|
|
records = extract_force_data(h5_path, sub_output_dir, episode_id, subset_path) |
|
|
else: |
|
|
continue |
|
|
|
|
|
all_records.extend(records) |
|
|
|
|
|
episode_dir = sub_output_dir / episode_id |
|
|
with open(episode_dir / "metadata.json", 'w') as f: |
|
|
json.dump(records, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\nError: {h5_path}: {e}") |
|
|
|
|
|
with open(output_folder / "metadata.jsonl", 'w') as f: |
|
|
for record in all_records: |
|
|
f.write(json.dumps(record, ensure_ascii=False) + '\n') |
|
|
|
|
|
print(f" 生成 {len(all_records)} 条记录") |
|
|
|
|
|
|
|
|
def update_metadata(): |
|
|
"""更新 metadata,添加热力图和视频路径""" |
|
|
data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact'] |
|
|
updated_count = 0 |
|
|
|
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
|
|
|
json_files = list(folder.rglob('metadata.json')) |
|
|
print(f"\n更新 {folder_name}: {len(json_files)} 个文件") |
|
|
|
|
|
for json_path in tqdm(json_files, desc=folder_name): |
|
|
episode_dir = json_path.parent |
|
|
rel_prefix = str(episode_dir.relative_to(BASE_DIR)) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
modified = False |
|
|
|
|
|
for record in records: |
|
|
frame_idx = record.get('frame_idx', 0) |
|
|
|
|
|
|
|
|
if 'image' in record and 'file_name' in record: |
|
|
if record['image'] == record['file_name']: |
|
|
del record['image'] |
|
|
modified = True |
|
|
|
|
|
|
|
|
for s_idx in range(100): |
|
|
for prefix, key_prefix in [('tactile', 'tactile_heatmap'), ('xela', 'xela_heatmap')]: |
|
|
heatmap_file = episode_dir / f"{prefix}_f{frame_idx:04d}_s{s_idx:02d}.png" |
|
|
if heatmap_file.exists(): |
|
|
key = f"{key_prefix}_s{s_idx:02d}" |
|
|
new_path = f"{rel_prefix}/{prefix}_f{frame_idx:04d}_s{s_idx:02d}.png" |
|
|
if record.get(key) != new_path: |
|
|
record[key] = new_path |
|
|
modified = True |
|
|
else: |
|
|
break |
|
|
|
|
|
for prefix in ['tac02', 'xela']: |
|
|
heatmap_file = episode_dir / f"{prefix}_{frame_idx:04d}.png" |
|
|
if heatmap_file.exists(): |
|
|
key = f"{prefix}_heatmap" |
|
|
new_path = f"{rel_prefix}/{prefix}_{frame_idx:04d}.png" |
|
|
if record.get(key) != new_path: |
|
|
record[key] = new_path |
|
|
modified = True |
|
|
|
|
|
for subdir, key in [('tacniq', 'tacniq_heatmap'), ('xela', 'xela_heatmap')]: |
|
|
heatmap_file = episode_dir / subdir / f"heatmap_{frame_idx:04d}.png" |
|
|
if heatmap_file.exists(): |
|
|
new_path = f"{rel_prefix}/{subdir}/heatmap_{frame_idx:04d}.png" |
|
|
if record.get(key) != new_path: |
|
|
record[key] = new_path |
|
|
modified = True |
|
|
|
|
|
|
|
|
for video_file in episode_dir.glob('video*.mp4'): |
|
|
video_key = video_file.stem |
|
|
video_path = f"{rel_prefix}/{video_file.name}" |
|
|
for record in records: |
|
|
if record.get(video_key) != video_path: |
|
|
record[video_key] = video_path |
|
|
modified = True |
|
|
|
|
|
if modified: |
|
|
with open(json_path, 'w') as f: |
|
|
json.dump(records, f, indent=2, ensure_ascii=False) |
|
|
updated_count += 1 |
|
|
|
|
|
print(f"\n更新 {updated_count} 个文件") |
|
|
|
|
|
|
|
|
print("\n重新生成 JSONL...") |
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
|
|
|
all_records = [] |
|
|
for json_path in folder.rglob('metadata.json'): |
|
|
with open(json_path, 'r') as f: |
|
|
all_records.extend(json.load(f)) |
|
|
|
|
|
if all_records: |
|
|
with open(folder / "metadata.jsonl", 'w') as f: |
|
|
for record in all_records: |
|
|
f.write(json.dumps(record, ensure_ascii=False) + '\n') |
|
|
print(f" {folder_name}: {len(all_records)} 条记录") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_heatmaps(data_type='all', test_only=False): |
|
|
"""生成热力图""" |
|
|
|
|
|
def process_tac02_pose(): |
|
|
data_dir = BASE_DIR / 'pose_data' / 'tac02_pose_h5' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n处理 tac02_pose_h5...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="tac02_pose"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'tactile' not in record or record['tactile'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
tactile = record['tactile'] |
|
|
|
|
|
if isinstance(tactile[0], list): |
|
|
for s_idx, sample in enumerate(tactile): |
|
|
output_path = episode_dir / f"tactile_f{frame_idx:04d}_s{s_idx:02d}.png" |
|
|
save_tactile_heatmap(sample, output_path) |
|
|
if test_only: |
|
|
print(f" 生成 {len(tactile)} 个热力图") |
|
|
return |
|
|
|
|
|
def process_xela_pose(): |
|
|
data_dir = BASE_DIR / 'pose_data' / 'xela_pose_h5' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n处理 xela_pose_h5...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_pose"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'xela' not in record or record['xela'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
xela = record['xela'] |
|
|
|
|
|
if isinstance(xela[0], list): |
|
|
for s_idx, sample in enumerate(xela): |
|
|
output_path = episode_dir / f"xela_f{frame_idx:04d}_s{s_idx:02d}.png" |
|
|
save_xela_heatmap(sample, output_path) |
|
|
if test_only: |
|
|
print(f" 生成 {len(xela)} 个热力图") |
|
|
return |
|
|
|
|
|
def process_force_data(sensor_type=None): |
|
|
force_dir = BASE_DIR / 'force_data' |
|
|
if not force_dir.exists(): |
|
|
return |
|
|
|
|
|
for subset_dir in force_dir.iterdir(): |
|
|
if not subset_dir.is_dir(): |
|
|
continue |
|
|
|
|
|
if 'tac02' in subset_dir.name: |
|
|
if sensor_type and sensor_type != 'tac02': |
|
|
continue |
|
|
data_key, prefix = 'tac02', 'tac02' |
|
|
elif 'xela' in subset_dir.name: |
|
|
if sensor_type and sensor_type != 'xela': |
|
|
continue |
|
|
data_key, prefix = 'xela', 'xela' |
|
|
else: |
|
|
continue |
|
|
|
|
|
print(f"\n处理 {subset_dir.name}...") |
|
|
episode_dirs = list(subset_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc=subset_dir.name): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if data_key not in record or record[data_key] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
heatmap_path = episode_dir / f"{prefix}_{frame_idx:04d}.png" |
|
|
if prefix == 'tac02': |
|
|
save_tactile_heatmap(record[data_key], heatmap_path) |
|
|
else: |
|
|
save_xela_heatmap(record[data_key], heatmap_path) |
|
|
if test_only: |
|
|
print(f" 生成: {heatmap_path}") |
|
|
return |
|
|
|
|
|
def process_tacniq_gsmini(): |
|
|
data_dir = BASE_DIR / 'tacniq_gsmini' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n处理 tacniq_gsmini...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="tacniq_gsmini"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
|
|
|
tacniq_dir = episode_dir / 'tacniq' |
|
|
tacniq_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'tacniq' not in record or record['tacniq'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
heatmap_path = tacniq_dir / f"heatmap_{frame_idx:04d}.png" |
|
|
save_tactile_heatmap(record['tacniq'], heatmap_path) |
|
|
if test_only: |
|
|
print(f" 生成: {heatmap_path}") |
|
|
return |
|
|
|
|
|
def process_xela_9dtact(): |
|
|
data_dir = BASE_DIR / 'xela_9dtact' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n处理 xela_9dtact...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_9dtact"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
|
|
|
xela_dir = episode_dir / 'xela' |
|
|
xela_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'xela' not in record or record['xela'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
heatmap_path = xela_dir / f"heatmap_{frame_idx:04d}.png" |
|
|
save_xela_heatmap(record['xela'], heatmap_path) |
|
|
if test_only: |
|
|
print(f" 生成: {heatmap_path}") |
|
|
return |
|
|
|
|
|
t = data_type |
|
|
if t in ['tac02_pose', 'pose', 'all']: |
|
|
process_tac02_pose() |
|
|
if t in ['xela_pose', 'pose', 'all']: |
|
|
process_xela_pose() |
|
|
if t in ['tac02_force', 'force', 'all']: |
|
|
process_force_data('tac02') |
|
|
if t in ['xela_force', 'force', 'all']: |
|
|
process_force_data('xela') |
|
|
if t in ['tacniq_gsmini', 'all']: |
|
|
process_tacniq_gsmini() |
|
|
if t in ['xela_9dtact', 'all']: |
|
|
process_xela_9dtact() |
|
|
|
|
|
|
|
|
def generate_marker_flow(data_type='all', test_only=False): |
|
|
"""生成 xela marker flow 可视化""" |
|
|
|
|
|
def process_xela_pose(): |
|
|
data_dir = BASE_DIR / 'pose_data' / 'xela_pose_h5' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n生成 xela_pose marker flow...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_pose"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
|
|
|
|
|
|
flow_dir = episode_dir / 'marker_flow' |
|
|
flow_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'xela' not in record or record['xela'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
xela = record['xela'] |
|
|
|
|
|
if isinstance(xela[0], list): |
|
|
for s_idx, sample in enumerate(xela): |
|
|
output_path = flow_dir / f"flow_f{frame_idx:04d}_s{s_idx:02d}.png" |
|
|
save_xela_marker_flow(sample, output_path) |
|
|
if test_only: |
|
|
print(f" 生成 {len(xela)} 个 marker flow") |
|
|
return |
|
|
else: |
|
|
output_path = flow_dir / f"flow_{frame_idx:04d}.png" |
|
|
save_xela_marker_flow(xela, output_path) |
|
|
if test_only: |
|
|
print(f" 生成: {output_path}") |
|
|
return |
|
|
|
|
|
def process_xela_force(): |
|
|
force_dir = BASE_DIR / 'force_data' |
|
|
if not force_dir.exists(): |
|
|
return |
|
|
|
|
|
for subset_dir in force_dir.iterdir(): |
|
|
if not subset_dir.is_dir() or 'xela' not in subset_dir.name: |
|
|
continue |
|
|
|
|
|
print(f"\n生成 {subset_dir.name} marker flow...") |
|
|
episode_dirs = list(subset_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc=subset_dir.name): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
|
|
|
flow_dir = episode_dir / 'marker_flow' |
|
|
flow_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'xela' not in record or record['xela'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
output_path = flow_dir / f"flow_{frame_idx:04d}.png" |
|
|
save_xela_marker_flow(record['xela'], output_path) |
|
|
if test_only: |
|
|
print(f" 生成: {output_path}") |
|
|
return |
|
|
|
|
|
def process_xela_9dtact(): |
|
|
data_dir = BASE_DIR / 'xela_9dtact' |
|
|
if not data_dir.exists(): |
|
|
return |
|
|
print(f"\n生成 xela_9dtact marker flow...") |
|
|
episode_dirs = list(data_dir.iterdir()) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_9dtact"): |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
continue |
|
|
|
|
|
|
|
|
flow_dir = episode_dir / 'xela' / 'marker_flow' |
|
|
flow_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
for record in (records[:1] if test_only else records): |
|
|
if 'xela' not in record or record['xela'] is None: |
|
|
continue |
|
|
frame_idx = record['frame_idx'] |
|
|
output_path = flow_dir / f"flow_{frame_idx:04d}.png" |
|
|
save_xela_marker_flow(record['xela'], output_path) |
|
|
if test_only: |
|
|
print(f" 生成: {output_path}") |
|
|
return |
|
|
|
|
|
t = data_type |
|
|
if t in ['xela_pose', 'pose', 'all']: |
|
|
process_xela_pose() |
|
|
if t in ['xela_force', 'force', 'all']: |
|
|
process_xela_force() |
|
|
if t in ['xela_9dtact', 'all']: |
|
|
process_xela_9dtact() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_video_from_images(episode_dir, output_path, image_patterns=None, |
|
|
subdir=None, fps_fallback=10, multi_sample=False, |
|
|
sample_pattern=None): |
|
|
"""从图像序列创建视频""" |
|
|
json_path = episode_dir / 'metadata.json' |
|
|
if not json_path.exists(): |
|
|
return False |
|
|
|
|
|
with open(json_path, 'r') as f: |
|
|
records = json.load(f) |
|
|
|
|
|
if not records: |
|
|
return False |
|
|
|
|
|
img_dir = episode_dir / subdir if subdir else episode_dir |
|
|
|
|
|
if multi_sample and sample_pattern: |
|
|
all_frames = [] |
|
|
timestamps = [] |
|
|
for record in records: |
|
|
frame_idx = record.get('frame_idx', len(timestamps)) |
|
|
timestamp = (record.get('sensor_timestamps') or |
|
|
record.get('force_timestamps') or |
|
|
record.get('timestamp')) |
|
|
timestamps.append({'frame_idx': frame_idx, 'timestamp': timestamp}) |
|
|
|
|
|
timestamps.sort(key=lambda x: x['frame_idx']) |
|
|
|
|
|
for i, ts_info in enumerate(timestamps): |
|
|
frame_idx = ts_info['frame_idx'] |
|
|
sample_files = [] |
|
|
for sample_idx in range(100): |
|
|
try: |
|
|
filename = sample_pattern.format(idx=frame_idx, sample=sample_idx) |
|
|
candidate = img_dir / filename |
|
|
if candidate.exists(): |
|
|
sample_files.append(candidate) |
|
|
else: |
|
|
break |
|
|
except (KeyError, ValueError): |
|
|
break |
|
|
|
|
|
if not sample_files: |
|
|
continue |
|
|
|
|
|
if i < len(timestamps) - 1 and ts_info['timestamp'] and timestamps[i+1]['timestamp']: |
|
|
frame_duration = max(0.01, min(2.0, timestamps[i+1]['timestamp'] - ts_info['timestamp'])) |
|
|
else: |
|
|
frame_duration = 1.0 / fps_fallback |
|
|
|
|
|
sample_duration = frame_duration / len(sample_files) |
|
|
for sample_file in sample_files: |
|
|
all_frames.append({'path': sample_file, 'duration': sample_duration}) |
|
|
|
|
|
if len(all_frames) < 2: |
|
|
return False |
|
|
|
|
|
|
|
|
concat_file = str(episode_dir / '_concat.txt') |
|
|
with open(concat_file, 'w') as f: |
|
|
for frame in all_frames: |
|
|
|
|
|
rel_path = frame['path'].relative_to(episode_dir) |
|
|
f.write(f"file '{rel_path}'\nduration {frame['duration']:.6f}\n") |
|
|
rel_path = all_frames[-1]['path'].relative_to(episode_dir) |
|
|
f.write(f"file '{rel_path}'\n") |
|
|
else: |
|
|
if image_patterns is None: |
|
|
image_patterns = ["gelsight_{idx:04d}.png", "xela_{idx:04d}.png", "tac02_{idx:04d}.png"] |
|
|
|
|
|
frames = [] |
|
|
for record in records: |
|
|
frame_idx = record.get('frame_idx', len(frames)) |
|
|
image_file = None |
|
|
|
|
|
for field in ['file_name', 'gsmini_image', 'dtact_image']: |
|
|
if field in record and record[field]: |
|
|
img_path = record[field].split('/')[-1] |
|
|
candidate = img_dir / img_path |
|
|
if candidate.exists(): |
|
|
image_file = candidate |
|
|
break |
|
|
|
|
|
if not image_file: |
|
|
for pattern in image_patterns: |
|
|
try: |
|
|
candidate = img_dir / pattern.format(idx=frame_idx) |
|
|
if candidate.exists(): |
|
|
image_file = candidate |
|
|
break |
|
|
except: |
|
|
continue |
|
|
|
|
|
if not image_file and subdir: |
|
|
for pattern in [f"frame_{frame_idx:04d}.png", f"heatmap_{frame_idx:04d}.png"]: |
|
|
candidate = img_dir / pattern |
|
|
if candidate.exists(): |
|
|
image_file = candidate |
|
|
break |
|
|
|
|
|
if image_file: |
|
|
timestamp = (record.get('sensor_timestamps') or |
|
|
record.get('force_timestamps') or |
|
|
record.get('timestamp')) |
|
|
frames.append({'path': image_file, 'timestamp': timestamp, 'frame_idx': frame_idx}) |
|
|
|
|
|
if len(frames) < 2: |
|
|
return False |
|
|
|
|
|
frames.sort(key=lambda x: x['frame_idx']) |
|
|
|
|
|
|
|
|
concat_file = str(episode_dir / '_concat.txt') |
|
|
with open(concat_file, 'w') as f: |
|
|
for i, frame in enumerate(frames): |
|
|
if i < len(frames) - 1 and frame['timestamp'] and frames[i+1]['timestamp']: |
|
|
duration = max(0.01, min(1.0, frames[i+1]['timestamp'] - frame['timestamp'])) |
|
|
else: |
|
|
duration = 1.0 / fps_fallback |
|
|
|
|
|
rel_path = frame['path'].relative_to(episode_dir) |
|
|
f.write(f"file '{rel_path}'\nduration {duration:.6f}\n") |
|
|
rel_path = frames[-1]['path'].relative_to(episode_dir) |
|
|
f.write(f"file '{rel_path}'\n") |
|
|
|
|
|
|
|
|
cmd = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', concat_file, |
|
|
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2', |
|
|
'-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-crf', '23', output_path] |
|
|
|
|
|
try: |
|
|
result = subprocess.run(cmd, capture_output=True, text=True) |
|
|
return result.returncode == 0 |
|
|
except FileNotFoundError: |
|
|
print(" 错误: ffmpeg 未安装") |
|
|
return False |
|
|
finally: |
|
|
Path(concat_file).unlink(missing_ok=True) |
|
|
|
|
|
|
|
|
def generate_videos(data_type='all', test_only=False): |
|
|
"""生成视频""" |
|
|
|
|
|
def process(data_path, name, **kwargs): |
|
|
data_dir = BASE_DIR / data_path |
|
|
if not data_dir.exists(): |
|
|
print(f"{data_path} 不存在") |
|
|
return |
|
|
|
|
|
print(f"\n处理 {name}...") |
|
|
episode_dirs = sorted([d for d in data_dir.iterdir() if d.is_dir()], |
|
|
key=lambda x: int(x.name.split('_')[-1])) |
|
|
if test_only: |
|
|
episode_dirs = episode_dirs[:1] |
|
|
|
|
|
video_name = kwargs.pop('video_name', 'video.mp4') |
|
|
success = 0 |
|
|
for episode_dir in tqdm(episode_dirs, desc=name): |
|
|
if create_video_from_images(episode_dir, str(episode_dir / video_name), **kwargs): |
|
|
success += 1 |
|
|
if test_only: |
|
|
print(f" 生成: {episode_dir / video_name}") |
|
|
print(f" 成功: {success}/{len(episode_dirs)}") |
|
|
|
|
|
t = data_type |
|
|
|
|
|
|
|
|
if t in ['9dtact_force', 'all']: |
|
|
process('force_data/9dtact_force_h5', '9dtact_force', image_patterns=["gelsight_{idx:04d}.png"]) |
|
|
if t in ['xela_force', 'all']: |
|
|
process('force_data/xela_force_h5', 'xela_force', image_patterns=["xela_{idx:04d}.png"]) |
|
|
if t in ['gelsight_force', 'all']: |
|
|
process('force_data/gelsight_force_h5', 'gelsight_force', image_patterns=["gelsight_{idx:04d}.png"]) |
|
|
if t in ['tac02_force', 'all']: |
|
|
process('force_data/tac02_force_h5', 'tac02_force', image_patterns=["tac02_{idx:04d}.png"]) |
|
|
|
|
|
|
|
|
if t in ['gelsight_pose', 'all']: |
|
|
process('pose_data/gelsight_pose_h5', 'gelsight_pose', multi_sample=True, sample_pattern="images_f{idx:04d}_s{sample}.png") |
|
|
if t in ['9dtact_pose', 'all']: |
|
|
process('pose_data/9dtact_pose_h5', '9dtact_pose', multi_sample=True, sample_pattern="images_f{idx:04d}_s{sample}.png") |
|
|
if t in ['tac02_pose', 'all']: |
|
|
process('pose_data/tac02_pose_h5', 'tac02_pose', multi_sample=True, sample_pattern="tactile_f{idx:04d}_s{sample:02d}.png") |
|
|
if t in ['xela_pose', 'all']: |
|
|
process('pose_data/xela_pose_h5', 'xela_pose', multi_sample=True, sample_pattern="xela_f{idx:04d}_s{sample:02d}.png") |
|
|
|
|
|
|
|
|
if t in ['xela_pose_flow', 'all']: |
|
|
process('pose_data/xela_pose_h5', 'xela_pose (marker_flow)', subdir='marker_flow', |
|
|
multi_sample=True, sample_pattern="flow_f{idx:04d}_s{sample:02d}.png", video_name="video_flow.mp4") |
|
|
if t in ['xela_force_flow', 'all']: |
|
|
process('force_data/xela_force_h5', 'xela_force (marker_flow)', subdir='marker_flow', |
|
|
image_patterns=["flow_{idx:04d}.png"], video_name="video_flow.mp4") |
|
|
if t in ['xela_9dtact_flow', 'all']: |
|
|
process('xela_9dtact', 'xela_9dtact (marker_flow)', subdir='xela/marker_flow', |
|
|
image_patterns=["flow_{idx:04d}.png"], video_name="video_flow.mp4") |
|
|
|
|
|
|
|
|
if t in ['tacniq_gsmini', 'all']: |
|
|
process('tacniq_gsmini', 'tacniq (gsmini)', subdir='gsmini', image_patterns=["frame_{idx:04d}.png"], video_name="video_gsmini.mp4") |
|
|
process('tacniq_gsmini', 'tacniq (tacniq)', subdir='tacniq', image_patterns=["heatmap_{idx:04d}.png"], video_name="video_tacniq.mp4") |
|
|
if t in ['xela_9dtact', 'all']: |
|
|
process('xela_9dtact', 'xela_9dtact (9dtact)', subdir='9dtact', image_patterns=["frame_{idx:04d}.png"], video_name="video_9dtact.mp4") |
|
|
process('xela_9dtact', 'xela_9dtact (xela)', subdir='xela', image_patterns=["heatmap_{idx:04d}.png"], video_name="video_xela.mp4") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pack_images(delete_originals=False): |
|
|
""" |
|
|
把每个 episode 的图像序列打包成 tar 文件(WebDataset 格式) |
|
|
减少文件数量,便于上传 Hugging Face |
|
|
""" |
|
|
import tarfile |
|
|
|
|
|
data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact'] |
|
|
|
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
|
|
|
|
|
|
episode_dirs = [] |
|
|
for p in folder.rglob('metadata.json'): |
|
|
episode_dirs.append(p.parent) |
|
|
|
|
|
print(f"\n打包 {folder_name}: {len(episode_dirs)} 个 episode") |
|
|
|
|
|
for episode_dir in tqdm(episode_dirs, desc=folder_name): |
|
|
|
|
|
image_files = list(episode_dir.glob('*.png')) |
|
|
|
|
|
|
|
|
for subdir in ['gsmini', '9dtact', 'tacniq', 'xela', 'marker_flow']: |
|
|
subpath = episode_dir / subdir |
|
|
if subpath.exists(): |
|
|
image_files.extend(subpath.glob('*.png')) |
|
|
|
|
|
for nested in subpath.iterdir(): |
|
|
if nested.is_dir(): |
|
|
image_files.extend(nested.glob('*.png')) |
|
|
|
|
|
if not image_files: |
|
|
continue |
|
|
|
|
|
|
|
|
tar_path = episode_dir / 'images.tar' |
|
|
with tarfile.open(tar_path, 'w') as tar: |
|
|
for img_path in image_files: |
|
|
|
|
|
arcname = str(img_path.relative_to(episode_dir)) |
|
|
tar.add(img_path, arcname=arcname) |
|
|
|
|
|
|
|
|
if delete_originals: |
|
|
for img_path in image_files: |
|
|
img_path.unlink() |
|
|
|
|
|
for subdir in ['gsmini', '9dtact', 'tacniq', 'xela', 'marker_flow']: |
|
|
subpath = episode_dir / subdir |
|
|
if subpath.exists(): |
|
|
for nested in subpath.iterdir(): |
|
|
if nested.is_dir() and not any(nested.iterdir()): |
|
|
nested.rmdir() |
|
|
if not any(subpath.iterdir()): |
|
|
subpath.rmdir() |
|
|
|
|
|
print("\n打包完成!") |
|
|
if delete_originals: |
|
|
print("原始图像文件已删除") |
|
|
|
|
|
|
|
|
def unpack_images(delete_tar=False): |
|
|
""" |
|
|
解压 tar 文件中的图像 |
|
|
""" |
|
|
import tarfile |
|
|
|
|
|
data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact'] |
|
|
|
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
|
|
|
|
|
|
tar_files = list(folder.rglob('images.tar')) |
|
|
if not tar_files: |
|
|
continue |
|
|
|
|
|
print(f"\n解压 {folder_name}: {len(tar_files)} 个 tar 文件") |
|
|
|
|
|
for tar_path in tqdm(tar_files, desc=folder_name): |
|
|
episode_dir = tar_path.parent |
|
|
|
|
|
try: |
|
|
with tarfile.open(tar_path, 'r') as tar: |
|
|
tar.extractall(path=episode_dir) |
|
|
|
|
|
if delete_tar: |
|
|
tar_path.unlink() |
|
|
except Exception as e: |
|
|
print(f"\n 解压失败 {tar_path}: {e}") |
|
|
|
|
|
print("\n解压完成!") |
|
|
if delete_tar: |
|
|
print("tar 文件已删除") |
|
|
|
|
|
|
|
|
def clean_images(): |
|
|
"""删除所有 PNG 图像,只保留视频和 metadata""" |
|
|
data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact'] |
|
|
|
|
|
total_deleted = 0 |
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
|
|
|
png_files = list(folder.rglob('*.png')) |
|
|
print(f"{folder_name}: {len(png_files)} 个 PNG 文件") |
|
|
|
|
|
for png_path in tqdm(png_files, desc=f"删除 {folder_name}"): |
|
|
png_path.unlink() |
|
|
total_deleted += 1 |
|
|
|
|
|
|
|
|
for folder_name in data_folders: |
|
|
folder = BASE_DIR / folder_name |
|
|
if not folder.exists(): |
|
|
continue |
|
|
for subdir in folder.rglob('*'): |
|
|
if subdir.is_dir() and not any(subdir.iterdir()): |
|
|
subdir.rmdir() |
|
|
|
|
|
print(f"\n删除完成!共删除 {total_deleted} 个文件") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def upload_to_hf(sync=False): |
|
|
"""上传到 Hugging Face |
|
|
|
|
|
Args: |
|
|
sync: 如果为 True,删除远端存在但本地不存在的文件 |
|
|
""" |
|
|
from huggingface_hub import HfApi |
|
|
|
|
|
api = HfApi() |
|
|
|
|
|
has_large_upload = hasattr(api, "upload_large_folder") |
|
|
if has_large_upload: |
|
|
large_params = set(inspect.signature(api.upload_large_folder).parameters) |
|
|
else: |
|
|
large_params = set() |
|
|
|
|
|
supports_delete = "delete_patterns" in large_params |
|
|
|
|
|
if sync and not supports_delete: |
|
|
|
|
|
api.upload_folder( |
|
|
repo_id="BorisGuo/pair_touch_13m", |
|
|
repo_type="dataset", |
|
|
folder_path=str(BASE_DIR), |
|
|
ignore_patterns=["__pycache__/**", "*.h5"], |
|
|
delete_patterns=["*"], |
|
|
) |
|
|
else: |
|
|
|
|
|
upload_kwargs = { |
|
|
"repo_id": "BorisGuo/pair_touch_13m", |
|
|
"repo_type": "dataset", |
|
|
"folder_path": str(BASE_DIR), |
|
|
"ignore_patterns": ["__pycache__/**", "*.h5"], |
|
|
} |
|
|
if sync and supports_delete: |
|
|
upload_kwargs["delete_patterns"] = ["*"] |
|
|
if has_large_upload: |
|
|
api.upload_large_folder(**upload_kwargs) |
|
|
else: |
|
|
api.upload_folder(**upload_kwargs) |
|
|
print("上传完成!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="数据集预处理") |
|
|
subparsers = parser.add_subparsers(dest='command', help='命令') |
|
|
|
|
|
|
|
|
extract_parser = subparsers.add_parser('extract', help='解析 H5 文件') |
|
|
extract_parser.add_argument('--check', action='store_true', help='仅检查结构') |
|
|
extract_parser.add_argument('--update', action='store_true', help='仅更新 metadata') |
|
|
|
|
|
|
|
|
heatmap_parser = subparsers.add_parser('heatmap', help='生成热力图') |
|
|
heatmap_parser.add_argument('--test', action='store_true', help='测试模式') |
|
|
heatmap_parser.add_argument('--type', default='all', help='数据类型') |
|
|
|
|
|
|
|
|
flow_parser = subparsers.add_parser('marker_flow', help='生成 xela marker flow 可视化') |
|
|
flow_parser.add_argument('--test', action='store_true', help='测试模式') |
|
|
flow_parser.add_argument('--type', default='all', |
|
|
choices=['xela_pose', 'xela_force', 'xela_9dtact', 'pose', 'force', 'all'], |
|
|
help='数据类型') |
|
|
|
|
|
|
|
|
video_parser = subparsers.add_parser('video', help='生成视频') |
|
|
video_parser.add_argument('--test', action='store_true', help='测试模式') |
|
|
video_parser.add_argument('--type', default='all', help='数据类型') |
|
|
|
|
|
|
|
|
pack_parser = subparsers.add_parser('pack', help='打包图像序列为 tar 文件') |
|
|
pack_parser.add_argument('--delete', action='store_true', help='打包后删除原始图像') |
|
|
|
|
|
|
|
|
unpack_parser = subparsers.add_parser('unpack', help='解压 tar 文件中的图像') |
|
|
unpack_parser.add_argument('--delete', action='store_true', help='解压后删除 tar 文件') |
|
|
|
|
|
|
|
|
subparsers.add_parser('clean', help='删除所有 PNG 图像,只保留视频') |
|
|
|
|
|
|
|
|
upload_parser = subparsers.add_parser('upload', help='上传到 Hugging Face') |
|
|
upload_parser.add_argument('--sync', action='store_true', |
|
|
help='同步模式:删除远端存在但本地不存在的文件') |
|
|
|
|
|
|
|
|
subparsers.add_parser('all', help='完整流程') |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.command == 'extract': |
|
|
if args.check: |
|
|
check_h5_structure() |
|
|
elif args.update: |
|
|
update_metadata() |
|
|
else: |
|
|
extract_all() |
|
|
elif args.command == 'heatmap': |
|
|
print("生成热力图...") |
|
|
generate_heatmaps(args.type, args.test) |
|
|
print("\n完成!") |
|
|
elif args.command == 'marker_flow': |
|
|
print("生成 marker flow...") |
|
|
generate_marker_flow(args.type, args.test) |
|
|
print("\n完成!") |
|
|
elif args.command == 'video': |
|
|
print("生成视频...") |
|
|
generate_videos(args.type, args.test) |
|
|
print("\n完成!") |
|
|
elif args.command == 'pack': |
|
|
print("打包图像序列...") |
|
|
pack_images(delete_originals=args.delete) |
|
|
elif args.command == 'unpack': |
|
|
print("解压图像...") |
|
|
unpack_images(delete_tar=args.delete) |
|
|
elif args.command == 'clean': |
|
|
print("清理图像文件...") |
|
|
clean_images() |
|
|
elif args.command == 'upload': |
|
|
upload_to_hf(sync=args.sync) |
|
|
elif args.command == 'all': |
|
|
print("="*60 + "\n完整流程\n" + "="*60) |
|
|
print("\n[1/4] 解析 H5 文件...") |
|
|
extract_all() |
|
|
print("\n[2/4] 生成热力图...") |
|
|
generate_heatmaps('all', False) |
|
|
print("\n[3/4] 生成视频...") |
|
|
generate_videos('all', False) |
|
|
print("\n[4/4] 更新 metadata...") |
|
|
update_metadata() |
|
|
print("\n" + "="*60 + "\n完成!\n" + "="*60) |
|
|
else: |
|
|
parser.print_help() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|