BorisGuo commited on
Commit
56dae00
·
verified ·
1 Parent(s): bc43eae

Upload preprocess.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. preprocess.py +1333 -0
preprocess.py ADDED
@@ -0,0 +1,1333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ 数据集预处理统一入口
4
+
5
+ 用法:
6
+ python preprocess.py extract # 解析 H5 文件
7
+ python preprocess.py extract --check # 仅检查 H5 结构
8
+ python preprocess.py extract --update # 更新 metadata(添加热力图/视频路径)
9
+ python preprocess.py heatmap # 生成热力图
10
+ python preprocess.py heatmap --test # 测试热力图生成
11
+ python preprocess.py marker_flow # 生成 xela marker flow 可视化
12
+ python preprocess.py marker_flow --test # 测试 marker flow 生成
13
+ python preprocess.py video # 生成视频
14
+ python preprocess.py video --test # 测试视频生成
15
+ python preprocess.py pack # 打包图像为 tar 文件
16
+ python preprocess.py pack --delete # 打包后删除原始图像
17
+ python preprocess.py unpack # 解压 tar 文件
18
+ python preprocess.py unpack --delete # 解压后删除 tar 文件
19
+ python preprocess.py clean # 删除所有 PNG,只保留视频
20
+ python preprocess.py upload # 上传到 Hugging Face
21
+ python preprocess.py upload --sync # 同步上传(删除远端多余文件)
22
+ python preprocess.py all # 完整流程(extract -> heatmap -> video -> update)
23
+ """
24
+
25
+ import argparse
26
+ import json
27
+ import subprocess
28
+ import tempfile
29
+ from pathlib import Path
30
+ from collections import defaultdict
31
+
32
+ import h5py
33
+ import numpy as np
34
+ from PIL import Image
35
+ from tqdm import tqdm
36
+ import matplotlib
37
+ matplotlib.use('Agg')
38
+ import matplotlib.pyplot as plt
39
+
40
+
41
+ # ============================================================
42
+ # 配置
43
+ # ============================================================
44
+
45
+ BASE_DIR = Path(__file__).parent
46
+
47
+ # 热力图配置
48
+ TACTILE_VMIN = 15
49
+ TACTILE_VMAX = 750
50
+ TACTILE_CMAP = 'plasma'
51
+ XELA_VMIN = -5
52
+ XELA_VMAX = 5
53
+ XELA_CMAP = 'RdBu_r'
54
+
55
+
56
+ # ============================================================
57
+ # 热力图生成函数
58
+ # ============================================================
59
+
60
+ def save_tactile_heatmap(data, output_path, rows=11, cols=6):
61
+ """保存 tactile 热力图"""
62
+ data = np.array(data)
63
+ if len(data.shape) == 1:
64
+ if len(data) == rows * cols:
65
+ data = data.reshape(rows, cols)
66
+ else:
67
+ data = data.reshape(1, -1)
68
+
69
+ fig, ax = plt.subplots(figsize=(cols * 0.5, rows * 0.5))
70
+ ax.imshow(data, cmap=TACTILE_CMAP, aspect='equal', interpolation='nearest',
71
+ vmin=TACTILE_VMIN, vmax=TACTILE_VMAX)
72
+ ax.axis('off')
73
+ plt.savefig(output_path, dpi=80, bbox_inches='tight', pad_inches=0)
74
+ plt.close(fig)
75
+
76
+
77
+ def save_xela_heatmap(data, output_path):
78
+ """保存 xela 热力图(Z轴热力图 + XY箭头)"""
79
+ data = np.array(data)
80
+
81
+ if len(data) == 72:
82
+ data = data.reshape(4, 6, 3)
83
+ fx, fy, fz = data[:, :, 0], data[:, :, 1], data[:, :, 2]
84
+
85
+ fig, ax = plt.subplots(figsize=(4, 3))
86
+ ax.imshow(fz, cmap=XELA_CMAP, aspect='equal', interpolation='nearest',
87
+ vmin=XELA_VMIN, vmax=XELA_VMAX)
88
+
89
+ rows, cols = 4, 6
90
+ y_grid, x_grid = np.mgrid[0:rows, 0:cols]
91
+ magnitude = np.sqrt(fx**2 + fy**2)
92
+ max_mag = magnitude.max() if magnitude.max() > 0 else 1
93
+ scale = 0.4 / max_mag
94
+
95
+ ax.quiver(x_grid, y_grid, fx * scale, -fy * scale,
96
+ color='black', scale=1, scale_units='xy',
97
+ width=0.02, headwidth=3, headlength=2)
98
+ ax.axis('off')
99
+ plt.savefig(output_path, dpi=100, bbox_inches='tight', pad_inches=0)
100
+ plt.close(fig)
101
+ else:
102
+ fig, ax = plt.subplots(figsize=(6, 1))
103
+ ax.imshow(data.reshape(1, -1), cmap=XELA_CMAP, aspect='auto',
104
+ vmin=XELA_VMIN, vmax=XELA_VMAX)
105
+ ax.axis('off')
106
+ plt.savefig(output_path, dpi=80, bbox_inches='tight', pad_inches=0)
107
+ plt.close(fig)
108
+
109
+
110
+ def save_xela_marker_flow(data, output_path):
111
+ """
112
+ 保存 xela marker flow 可视化
113
+ - 网格上的圆点根据 XY 力偏移(与箭头方向一致)
114
+ - Z 轴力用圆点大小和颜色表示
115
+ """
116
+ data = np.array(data)
117
+
118
+ if len(data) != 72:
119
+ return
120
+
121
+ data = data.reshape(4, 6, 3)
122
+ fx, fy, fz = data[:, :, 0], data[:, :, 1], data[:, :, 2]
123
+
124
+ # 使用与箭头相同的 scale 计算
125
+ magnitude = np.sqrt(fx**2 + fy**2)
126
+ max_mag = magnitude.max() if magnitude.max() > 0 else 1
127
+ scale = 0.4 / max_mag # 最大偏移 0.4 格
128
+
129
+ rows, cols = 4, 6
130
+ fig, ax = plt.subplots(figsize=(6, 4))
131
+
132
+ # 使用 imshow 建立与 heatmap 完全相同的坐标系
133
+ bg = np.ones((rows, cols)) * 0.95 # 浅灰背景
134
+ ax.imshow(bg, cmap='gray', vmin=0, vmax=1, aspect='equal')
135
+
136
+ # 绘制原始网格位置(浅灰色小点)
137
+ for i in range(rows):
138
+ for j in range(cols):
139
+ ax.plot(j, i, 'o', color='#cccccc', markersize=8)
140
+
141
+ # 绘制偏���后的 marker(与 quiver 完全相同的方向处理)
142
+ for i in range(rows):
143
+ for j in range(cols):
144
+ # 偏移量与 quiver 箭头方向完全一致
145
+ dx = fx[i, j] * scale
146
+ dy = -fy[i, j] * scale # 与 quiver 中的 -fy 一致
147
+
148
+ # 新位置
149
+ new_x = j + dx
150
+ new_y = i + dy
151
+
152
+ # 连线(从原点到新位置)
153
+ ax.plot([j, new_x], [i, new_y], '-', color='#888888', linewidth=1, alpha=0.5)
154
+
155
+ # 圆点大小根据 Z 轴力(法向力),使用固定范围
156
+ z_normalized = abs(fz[i, j]) / XELA_VMAX # 归一化到 [0, 1]
157
+ size = 8 + z_normalized * 15 # 基础大小 8,最大 23
158
+ size = min(max(size, 6), 25) # 限制范围
159
+
160
+ # 颜色根据 Z 轴力(正负)
161
+ if fz[i, j] > 0:
162
+ color = '#e74c3c' # 红色(正向力/压力)
163
+ else:
164
+ color = '#3498db' # 蓝色(负向力/拉力)
165
+
166
+ ax.plot(new_x, new_y, 'o', color=color, markersize=size,
167
+ markeredgecolor='white', markeredgewidth=0.5)
168
+
169
+ ax.axis('off')
170
+ plt.savefig(output_path, dpi=100, bbox_inches='tight', pad_inches=0.1)
171
+ plt.close(fig)
172
+
173
+
174
+ # ============================================================
175
+ # H5 解析函数
176
+ # ============================================================
177
+
178
+ def check_h5_structure():
179
+ """检查 H5 文件结构"""
180
+ folder_keys = defaultdict(lambda: defaultdict(set))
181
+ h5_folders = [d for d in BASE_DIR.iterdir() if d.is_dir() and d.name.endswith('_h5')]
182
+
183
+ for h5_folder in sorted(h5_folders):
184
+ h5_files = list(h5_folder.rglob('*.h5'))
185
+ print(f"\n{'='*60}\n文件夹: {h5_folder.name} ({len(h5_files)} 个文件)\n{'='*60}")
186
+
187
+ for h5_path in h5_files[:3]:
188
+ print(f"\n {h5_path.name}:")
189
+ with h5py.File(h5_path, 'r') as f:
190
+ for key in sorted(f.keys()):
191
+ arr = f[key]
192
+ print(f" - {key}: shape={arr.shape}, dtype={arr.dtype}")
193
+ folder_keys[h5_folder.name][key].add(str(arr.shape))
194
+
195
+ for h5_path in h5_files:
196
+ with h5py.File(h5_path, 'r') as f:
197
+ for key in f.keys():
198
+ folder_keys[h5_folder.name][key].add(str(f[key].shape))
199
+
200
+ print(f"\n 汇总:")
201
+ for key, shapes in sorted(folder_keys[h5_folder.name].items()):
202
+ print(f" - {key}: {list(shapes)}")
203
+
204
+
205
+ def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
206
+ """解析 pose_data H5 文件"""
207
+ episode_dir = output_dir / episode_id
208
+ episode_dir.mkdir(parents=True, exist_ok=True)
209
+ rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id
210
+ records = []
211
+
212
+ with h5py.File(h5_path, 'r') as f:
213
+ keys = list(f.keys())
214
+ num_frames = len(f['timestamps'][:]) if 'timestamps' in keys else 0
215
+ data_cache = {}
216
+ image_paths = {}
217
+
218
+ for key in keys:
219
+ arr = f[key][:]
220
+ if arr.dtype == np.uint8:
221
+ if len(arr.shape) == 3:
222
+ filename = "bg.png"
223
+ Image.fromarray(arr).save(episode_dir / filename)
224
+ data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}"
225
+ elif len(arr.shape) == 4:
226
+ paths = []
227
+ for i, img in enumerate(arr):
228
+ filename = f"{key}_{i:04d}.png"
229
+ Image.fromarray(img).save(episode_dir / filename)
230
+ paths.append(f"{rel_prefix}/{filename}")
231
+ image_paths[key] = paths
232
+ elif len(arr.shape) == 5:
233
+ num_samples = arr.shape[1]
234
+ paths = []
235
+ for frame_idx in range(arr.shape[0]):
236
+ frame_paths = []
237
+ for sample_idx in range(num_samples):
238
+ filename = f"{key}_f{frame_idx:04d}_s{sample_idx}.png"
239
+ Image.fromarray(arr[frame_idx, sample_idx]).save(episode_dir / filename)
240
+ frame_paths.append(f"{rel_prefix}/{filename}")
241
+ paths.append(frame_paths)
242
+ image_paths[key] = paths
243
+ data_cache[f"{key}_num_samples"] = num_samples
244
+ else:
245
+ data_cache[key] = arr.tolist()
246
+
247
+ for frame_idx in range(num_frames):
248
+ record = {"episode_id": episode_id, "frame_idx": frame_idx}
249
+ if subset_path:
250
+ record["subset"] = subset_path
251
+
252
+ for key, paths in image_paths.items():
253
+ if isinstance(paths[0], list):
254
+ for s_idx, p in enumerate(paths[frame_idx]):
255
+ if s_idx == 0:
256
+ record["file_name"] = p
257
+ record[f"image_s{s_idx}"] = p
258
+ else:
259
+ record["file_name"] = paths[frame_idx]
260
+
261
+ for key, val in data_cache.items():
262
+ if key.endswith("_image") or key.endswith("_num_samples"):
263
+ record[key] = val
264
+
265
+ if 'timestamps' in data_cache:
266
+ record["timestamp"] = data_cache['timestamps'][frame_idx]
267
+ if 'rotations' in data_cache:
268
+ record["rotation"] = data_cache['rotations'][frame_idx]
269
+ if 'translations' in data_cache:
270
+ record["translation"] = data_cache['translations'][frame_idx]
271
+ if 'tactile' in data_cache:
272
+ record["tactile"] = data_cache['tactile'][frame_idx]
273
+ if 'xela' in data_cache:
274
+ record["xela"] = data_cache['xela'][frame_idx]
275
+
276
+ record["num_frames"] = num_frames
277
+ records.append(record)
278
+
279
+ return records
280
+
281
+
282
+ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
283
+ """解析 force_data H5 文件"""
284
+ episode_dir = output_dir / episode_id
285
+ episode_dir.mkdir(parents=True, exist_ok=True)
286
+ rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id
287
+ records = []
288
+
289
+ with h5py.File(h5_path, 'r') as f:
290
+ keys = list(f.keys())
291
+ num_frames = 0
292
+ data_cache = {}
293
+ image_paths = {}
294
+
295
+ for key in keys:
296
+ arr = f[key][:]
297
+ if arr.dtype == np.uint8:
298
+ if len(arr.shape) == 3:
299
+ filename = f"{key}.png"
300
+ Image.fromarray(arr).save(episode_dir / filename)
301
+ data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}"
302
+ elif len(arr.shape) == 4:
303
+ num_frames = max(num_frames, len(arr))
304
+ paths = []
305
+ for i, img in enumerate(arr):
306
+ filename = f"{key}_{i:04d}.png"
307
+ Image.fromarray(img).save(episode_dir / filename)
308
+ paths.append(f"{rel_prefix}/{filename}")
309
+ image_paths[key] = paths
310
+ else:
311
+ data_cache[key] = arr.tolist()
312
+ if len(arr.shape) >= 1:
313
+ num_frames = max(num_frames, len(arr))
314
+
315
+ for frame_idx in range(num_frames):
316
+ record = {"episode_id": episode_id, "frame_idx": frame_idx, "num_frames": num_frames}
317
+ if subset_path:
318
+ record["subset"] = subset_path
319
+
320
+ for key, paths in image_paths.items():
321
+ if frame_idx < len(paths):
322
+ record["file_name"] = paths[frame_idx]
323
+
324
+ for key, val in data_cache.items():
325
+ if key.endswith("_image"):
326
+ record[key] = val
327
+ elif isinstance(val, list) and frame_idx < len(val):
328
+ record[key] = val[frame_idx]
329
+
330
+ records.append(record)
331
+
332
+ return records
333
+
334
+
335
+ def extract_tacniq_gsmini(h5_path, output_dir, episode_id, subset_path=""):
336
+ """解析 tacniq_gsmini H5 文件"""
337
+ episode_dir = output_dir / episode_id
338
+ episode_dir.mkdir(parents=True, exist_ok=True)
339
+ gsmini_dir = episode_dir / "gsmini"
340
+ gsmini_dir.mkdir(parents=True, exist_ok=True)
341
+ rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id
342
+ records = []
343
+
344
+ with h5py.File(h5_path, 'r') as f:
345
+ bg = f['bg'][:]
346
+ gsmini = f['gsmini'][:]
347
+ tacniq = f['tacniq'][:].tolist()
348
+
349
+ Image.fromarray(bg).save(episode_dir / "bg.png")
350
+ num_frames = len(gsmini)
351
+
352
+ for frame_idx in range(num_frames):
353
+ gsmini_filename = f"frame_{frame_idx:04d}.png"
354
+ Image.fromarray(gsmini[frame_idx]).save(gsmini_dir / gsmini_filename)
355
+
356
+ records.append({
357
+ "episode_id": episode_id,
358
+ "frame_idx": frame_idx,
359
+ "file_name": f"{rel_prefix}/gsmini/{gsmini_filename}",
360
+ "gsmini_image": f"{rel_prefix}/gsmini/{gsmini_filename}",
361
+ "bg_image": f"{rel_prefix}/bg.png",
362
+ "tacniq": tacniq[frame_idx] if frame_idx < len(tacniq) else None,
363
+ "num_frames": num_frames,
364
+ "subset": subset_path if subset_path else None,
365
+ })
366
+
367
+ return records
368
+
369
+
370
+ def extract_xela_9dtact(h5_path, output_dir, episode_id, subset_path=""):
371
+ """解析 xela_9dtact H5 文件"""
372
+ episode_dir = output_dir / episode_id
373
+ episode_dir.mkdir(parents=True, exist_ok=True)
374
+ dtact_dir = episode_dir / "9dtact"
375
+ dtact_dir.mkdir(parents=True, exist_ok=True)
376
+ rel_prefix = f"{subset_path}/{episode_id}" if subset_path else episode_id
377
+ records = []
378
+
379
+ with h5py.File(h5_path, 'r') as f:
380
+ bg = f['bg'][:]
381
+ dtact = f['9dtact'][:]
382
+ xela = f['xela'][:].tolist()
383
+
384
+ Image.fromarray(bg).save(episode_dir / "bg.png")
385
+ num_frames = len(dtact)
386
+
387
+ for frame_idx in range(num_frames):
388
+ dtact_filename = f"frame_{frame_idx:04d}.png"
389
+ Image.fromarray(dtact[frame_idx]).save(dtact_dir / dtact_filename)
390
+
391
+ records.append({
392
+ "episode_id": episode_id,
393
+ "frame_idx": frame_idx,
394
+ "file_name": f"{rel_prefix}/9dtact/{dtact_filename}",
395
+ "dtact_image": f"{rel_prefix}/9dtact/{dtact_filename}",
396
+ "bg_image": f"{rel_prefix}/bg.png",
397
+ "xela": xela[frame_idx] if frame_idx < len(xela) else None,
398
+ "num_frames": num_frames,
399
+ "subset": subset_path if subset_path else None,
400
+ })
401
+
402
+ return records
403
+
404
+
405
+ def extract_all():
406
+ """解析所有 H5 文件"""
407
+ h5_folders = [d for d in BASE_DIR.iterdir() if d.is_dir() and d.name.endswith('_h5')]
408
+
409
+ for h5_folder in h5_folders:
410
+ output_folder = BASE_DIR / h5_folder.name.replace('_h5', '')
411
+ output_folder.mkdir(exist_ok=True)
412
+
413
+ h5_files = list(h5_folder.rglob('*.h5'))
414
+ print(f"\n解析 {h5_folder.name}: {len(h5_files)} 个文件")
415
+
416
+ all_records = []
417
+
418
+ for h5_path in tqdm(h5_files, desc=h5_folder.name):
419
+ relative = h5_path.relative_to(h5_folder)
420
+ sub_output_dir = output_folder / relative.parent
421
+ sub_output_dir.mkdir(parents=True, exist_ok=True)
422
+
423
+ episode_id = h5_path.stem
424
+ subset_path = str(relative.parent) if relative.parent != Path('.') else ""
425
+
426
+ try:
427
+ if 'pose_data' in h5_folder.name:
428
+ records = extract_pose_data(h5_path, sub_output_dir, episode_id, subset_path)
429
+ elif 'tacniq_gsmini' in h5_folder.name:
430
+ records = extract_tacniq_gsmini(h5_path, sub_output_dir, episode_id, subset_path)
431
+ elif 'xela_9dtact' in h5_folder.name:
432
+ records = extract_xela_9dtact(h5_path, sub_output_dir, episode_id, subset_path)
433
+ elif 'force_data' in h5_folder.name:
434
+ records = extract_force_data(h5_path, sub_output_dir, episode_id, subset_path)
435
+ else:
436
+ continue
437
+
438
+ all_records.extend(records)
439
+
440
+ episode_dir = sub_output_dir / episode_id
441
+ with open(episode_dir / "metadata.json", 'w') as f:
442
+ json.dump(records, f, indent=2, ensure_ascii=False)
443
+
444
+ except Exception as e:
445
+ print(f"\nError: {h5_path}: {e}")
446
+
447
+ with open(output_folder / "metadata.jsonl", 'w') as f:
448
+ for record in all_records:
449
+ f.write(json.dumps(record, ensure_ascii=False) + '\n')
450
+
451
+ print(f" 生成 {len(all_records)} 条记录")
452
+
453
+
454
+ def update_metadata():
455
+ """更新 metadata,添加热力图和视频路径"""
456
+ data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact']
457
+ updated_count = 0
458
+
459
+ for folder_name in data_folders:
460
+ folder = BASE_DIR / folder_name
461
+ if not folder.exists():
462
+ continue
463
+
464
+ json_files = list(folder.rglob('metadata.json'))
465
+ print(f"\n更新 {folder_name}: {len(json_files)} 个文件")
466
+
467
+ for json_path in tqdm(json_files, desc=folder_name):
468
+ episode_dir = json_path.parent
469
+ rel_prefix = str(episode_dir.relative_to(BASE_DIR))
470
+
471
+ with open(json_path, 'r') as f:
472
+ records = json.load(f)
473
+
474
+ modified = False
475
+
476
+ for record in records:
477
+ frame_idx = record.get('frame_idx', 0)
478
+
479
+ # 删除重复的 image 字段
480
+ if 'image' in record and 'file_name' in record:
481
+ if record['image'] == record['file_name']:
482
+ del record['image']
483
+ modified = True
484
+
485
+ # 添加热力图路径
486
+ for s_idx in range(100):
487
+ for prefix, key_prefix in [('tactile', 'tactile_heatmap'), ('xela', 'xela_heatmap')]:
488
+ heatmap_file = episode_dir / f"{prefix}_f{frame_idx:04d}_s{s_idx:02d}.png"
489
+ if heatmap_file.exists():
490
+ key = f"{key_prefix}_s{s_idx:02d}"
491
+ new_path = f"{rel_prefix}/{prefix}_f{frame_idx:04d}_s{s_idx:02d}.png"
492
+ if record.get(key) != new_path:
493
+ record[key] = new_path
494
+ modified = True
495
+ else:
496
+ break
497
+
498
+ for prefix in ['tac02', 'xela']:
499
+ heatmap_file = episode_dir / f"{prefix}_{frame_idx:04d}.png"
500
+ if heatmap_file.exists():
501
+ key = f"{prefix}_heatmap"
502
+ new_path = f"{rel_prefix}/{prefix}_{frame_idx:04d}.png"
503
+ if record.get(key) != new_path:
504
+ record[key] = new_path
505
+ modified = True
506
+
507
+ for subdir, key in [('tacniq', 'tacniq_heatmap'), ('xela', 'xela_heatmap')]:
508
+ heatmap_file = episode_dir / subdir / f"heatmap_{frame_idx:04d}.png"
509
+ if heatmap_file.exists():
510
+ new_path = f"{rel_prefix}/{subdir}/heatmap_{frame_idx:04d}.png"
511
+ if record.get(key) != new_path:
512
+ record[key] = new_path
513
+ modified = True
514
+
515
+ # 添加视频路径
516
+ for video_file in episode_dir.glob('video*.mp4'):
517
+ video_key = video_file.stem
518
+ video_path = f"{rel_prefix}/{video_file.name}"
519
+ for record in records:
520
+ if record.get(video_key) != video_path:
521
+ record[video_key] = video_path
522
+ modified = True
523
+
524
+ if modified:
525
+ with open(json_path, 'w') as f:
526
+ json.dump(records, f, indent=2, ensure_ascii=False)
527
+ updated_count += 1
528
+
529
+ print(f"\n更新 {updated_count} 个文件")
530
+
531
+ # 重新生成 JSONL
532
+ print("\n重新生成 JSONL...")
533
+ for folder_name in data_folders:
534
+ folder = BASE_DIR / folder_name
535
+ if not folder.exists():
536
+ continue
537
+
538
+ all_records = []
539
+ for json_path in folder.rglob('metadata.json'):
540
+ with open(json_path, 'r') as f:
541
+ all_records.extend(json.load(f))
542
+
543
+ if all_records:
544
+ with open(folder / "metadata.jsonl", 'w') as f:
545
+ for record in all_records:
546
+ f.write(json.dumps(record, ensure_ascii=False) + '\n')
547
+ print(f" {folder_name}: {len(all_records)} 条记录")
548
+
549
+
550
+ # ============================================================
551
+ # 热力图生成
552
+ # ============================================================
553
+
554
+ def generate_heatmaps(data_type='all', test_only=False):
555
+ """生成热力图"""
556
+
557
+ def process_tac02_pose():
558
+ data_dir = BASE_DIR / 'pose_data' / 'tac02_pose_h5'
559
+ if not data_dir.exists():
560
+ return
561
+ print(f"\n处理 tac02_pose_h5...")
562
+ episode_dirs = list(data_dir.iterdir())
563
+ if test_only:
564
+ episode_dirs = episode_dirs[:1]
565
+
566
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="tac02_pose"):
567
+ json_path = episode_dir / 'metadata.json'
568
+ if not json_path.exists():
569
+ continue
570
+ with open(json_path, 'r') as f:
571
+ records = json.load(f)
572
+
573
+ for record in (records[:1] if test_only else records):
574
+ if 'tactile' not in record or record['tactile'] is None:
575
+ continue
576
+ frame_idx = record['frame_idx']
577
+ tactile = record['tactile']
578
+
579
+ if isinstance(tactile[0], list):
580
+ for s_idx, sample in enumerate(tactile):
581
+ output_path = episode_dir / f"tactile_f{frame_idx:04d}_s{s_idx:02d}.png"
582
+ save_tactile_heatmap(sample, output_path)
583
+ if test_only:
584
+ print(f" 生成 {len(tactile)} 个热力图")
585
+ return
586
+
587
+ def process_xela_pose():
588
+ data_dir = BASE_DIR / 'pose_data' / 'xela_pose_h5'
589
+ if not data_dir.exists():
590
+ return
591
+ print(f"\n处理 xela_pose_h5...")
592
+ episode_dirs = list(data_dir.iterdir())
593
+ if test_only:
594
+ episode_dirs = episode_dirs[:1]
595
+
596
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_pose"):
597
+ json_path = episode_dir / 'metadata.json'
598
+ if not json_path.exists():
599
+ continue
600
+ with open(json_path, 'r') as f:
601
+ records = json.load(f)
602
+
603
+ for record in (records[:1] if test_only else records):
604
+ if 'xela' not in record or record['xela'] is None:
605
+ continue
606
+ frame_idx = record['frame_idx']
607
+ xela = record['xela']
608
+
609
+ if isinstance(xela[0], list):
610
+ for s_idx, sample in enumerate(xela):
611
+ output_path = episode_dir / f"xela_f{frame_idx:04d}_s{s_idx:02d}.png"
612
+ save_xela_heatmap(sample, output_path)
613
+ if test_only:
614
+ print(f" 生成 {len(xela)} 个热力图")
615
+ return
616
+
617
+ def process_force_data(sensor_type=None):
618
+ force_dir = BASE_DIR / 'force_data'
619
+ if not force_dir.exists():
620
+ return
621
+
622
+ for subset_dir in force_dir.iterdir():
623
+ if not subset_dir.is_dir():
624
+ continue
625
+
626
+ if 'tac02' in subset_dir.name:
627
+ if sensor_type and sensor_type != 'tac02':
628
+ continue
629
+ data_key, prefix = 'tac02', 'tac02'
630
+ elif 'xela' in subset_dir.name:
631
+ if sensor_type and sensor_type != 'xela':
632
+ continue
633
+ data_key, prefix = 'xela', 'xela'
634
+ else:
635
+ continue
636
+
637
+ print(f"\n处理 {subset_dir.name}...")
638
+ episode_dirs = list(subset_dir.iterdir())
639
+ if test_only:
640
+ episode_dirs = episode_dirs[:1]
641
+
642
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc=subset_dir.name):
643
+ json_path = episode_dir / 'metadata.json'
644
+ if not json_path.exists():
645
+ continue
646
+ with open(json_path, 'r') as f:
647
+ records = json.load(f)
648
+
649
+ for record in (records[:1] if test_only else records):
650
+ if data_key not in record or record[data_key] is None:
651
+ continue
652
+ frame_idx = record['frame_idx']
653
+ heatmap_path = episode_dir / f"{prefix}_{frame_idx:04d}.png"
654
+ if prefix == 'tac02':
655
+ save_tactile_heatmap(record[data_key], heatmap_path)
656
+ else:
657
+ save_xela_heatmap(record[data_key], heatmap_path)
658
+ if test_only:
659
+ print(f" 生成: {heatmap_path}")
660
+ return
661
+
662
+ def process_tacniq_gsmini():
663
+ data_dir = BASE_DIR / 'tacniq_gsmini'
664
+ if not data_dir.exists():
665
+ return
666
+ print(f"\n处理 tacniq_gsmini...")
667
+ episode_dirs = list(data_dir.iterdir())
668
+ if test_only:
669
+ episode_dirs = episode_dirs[:1]
670
+
671
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="tacniq_gsmini"):
672
+ json_path = episode_dir / 'metadata.json'
673
+ if not json_path.exists():
674
+ continue
675
+
676
+ tacniq_dir = episode_dir / 'tacniq'
677
+ tacniq_dir.mkdir(parents=True, exist_ok=True)
678
+
679
+ with open(json_path, 'r') as f:
680
+ records = json.load(f)
681
+
682
+ for record in (records[:1] if test_only else records):
683
+ if 'tacniq' not in record or record['tacniq'] is None:
684
+ continue
685
+ frame_idx = record['frame_idx']
686
+ heatmap_path = tacniq_dir / f"heatmap_{frame_idx:04d}.png"
687
+ save_tactile_heatmap(record['tacniq'], heatmap_path)
688
+ if test_only:
689
+ print(f" 生成: {heatmap_path}")
690
+ return
691
+
692
+ def process_xela_9dtact():
693
+ data_dir = BASE_DIR / 'xela_9dtact'
694
+ if not data_dir.exists():
695
+ return
696
+ print(f"\n处理 xela_9dtact...")
697
+ episode_dirs = list(data_dir.iterdir())
698
+ if test_only:
699
+ episode_dirs = episode_dirs[:1]
700
+
701
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_9dtact"):
702
+ json_path = episode_dir / 'metadata.json'
703
+ if not json_path.exists():
704
+ continue
705
+
706
+ xela_dir = episode_dir / 'xela'
707
+ xela_dir.mkdir(parents=True, exist_ok=True)
708
+
709
+ with open(json_path, 'r') as f:
710
+ records = json.load(f)
711
+
712
+ for record in (records[:1] if test_only else records):
713
+ if 'xela' not in record or record['xela'] is None:
714
+ continue
715
+ frame_idx = record['frame_idx']
716
+ heatmap_path = xela_dir / f"heatmap_{frame_idx:04d}.png"
717
+ save_xela_heatmap(record['xela'], heatmap_path)
718
+ if test_only:
719
+ print(f" 生成: {heatmap_path}")
720
+ return
721
+
722
+ t = data_type
723
+ if t in ['tac02_pose', 'pose', 'all']:
724
+ process_tac02_pose()
725
+ if t in ['xela_pose', 'pose', 'all']:
726
+ process_xela_pose()
727
+ if t in ['tac02_force', 'force', 'all']:
728
+ process_force_data('tac02')
729
+ if t in ['xela_force', 'force', 'all']:
730
+ process_force_data('xela')
731
+ if t in ['tacniq_gsmini', 'all']:
732
+ process_tacniq_gsmini()
733
+ if t in ['xela_9dtact', 'all']:
734
+ process_xela_9dtact()
735
+
736
+
737
+ def generate_marker_flow(data_type='all', test_only=False):
738
+ """生成 xela marker flow 可视化"""
739
+
740
+ def process_xela_pose():
741
+ data_dir = BASE_DIR / 'pose_data' / 'xela_pose_h5'
742
+ if not data_dir.exists():
743
+ return
744
+ print(f"\n生成 xela_pose marker flow...")
745
+ episode_dirs = list(data_dir.iterdir())
746
+ if test_only:
747
+ episode_dirs = episode_dirs[:1]
748
+
749
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_pose"):
750
+ json_path = episode_dir / 'metadata.json'
751
+ if not json_path.exists():
752
+ continue
753
+
754
+ # 创建 marker_flow 子文件夹
755
+ flow_dir = episode_dir / 'marker_flow'
756
+ flow_dir.mkdir(parents=True, exist_ok=True)
757
+
758
+ with open(json_path, 'r') as f:
759
+ records = json.load(f)
760
+
761
+ for record in (records[:1] if test_only else records):
762
+ if 'xela' not in record or record['xela'] is None:
763
+ continue
764
+ frame_idx = record['frame_idx']
765
+ xela = record['xela']
766
+
767
+ if isinstance(xela[0], list):
768
+ for s_idx, sample in enumerate(xela):
769
+ output_path = flow_dir / f"flow_f{frame_idx:04d}_s{s_idx:02d}.png"
770
+ save_xela_marker_flow(sample, output_path)
771
+ if test_only:
772
+ print(f" 生成 {len(xela)} 个 marker flow")
773
+ return
774
+ else:
775
+ output_path = flow_dir / f"flow_{frame_idx:04d}.png"
776
+ save_xela_marker_flow(xela, output_path)
777
+ if test_only:
778
+ print(f" 生成: {output_path}")
779
+ return
780
+
781
+ def process_xela_force():
782
+ force_dir = BASE_DIR / 'force_data'
783
+ if not force_dir.exists():
784
+ return
785
+
786
+ for subset_dir in force_dir.iterdir():
787
+ if not subset_dir.is_dir() or 'xela' not in subset_dir.name:
788
+ continue
789
+
790
+ print(f"\n生成 {subset_dir.name} marker flow...")
791
+ episode_dirs = list(subset_dir.iterdir())
792
+ if test_only:
793
+ episode_dirs = episode_dirs[:1]
794
+
795
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc=subset_dir.name):
796
+ json_path = episode_dir / 'metadata.json'
797
+ if not json_path.exists():
798
+ continue
799
+
800
+ flow_dir = episode_dir / 'marker_flow'
801
+ flow_dir.mkdir(parents=True, exist_ok=True)
802
+
803
+ with open(json_path, 'r') as f:
804
+ records = json.load(f)
805
+
806
+ for record in (records[:1] if test_only else records):
807
+ if 'xela' not in record or record['xela'] is None:
808
+ continue
809
+ frame_idx = record['frame_idx']
810
+ output_path = flow_dir / f"flow_{frame_idx:04d}.png"
811
+ save_xela_marker_flow(record['xela'], output_path)
812
+ if test_only:
813
+ print(f" 生成: {output_path}")
814
+ return
815
+
816
+ def process_xela_9dtact():
817
+ data_dir = BASE_DIR / 'xela_9dtact'
818
+ if not data_dir.exists():
819
+ return
820
+ print(f"\n生成 xela_9dtact marker flow...")
821
+ episode_dirs = list(data_dir.iterdir())
822
+ if test_only:
823
+ episode_dirs = episode_dirs[:1]
824
+
825
+ for episode_dir in tqdm([d for d in episode_dirs if d.is_dir()], desc="xela_9dtact"):
826
+ json_path = episode_dir / 'metadata.json'
827
+ if not json_path.exists():
828
+ continue
829
+
830
+ # marker_flow 放在 xela 子文件夹内
831
+ flow_dir = episode_dir / 'xela' / 'marker_flow'
832
+ flow_dir.mkdir(parents=True, exist_ok=True)
833
+
834
+ with open(json_path, 'r') as f:
835
+ records = json.load(f)
836
+
837
+ for record in (records[:1] if test_only else records):
838
+ if 'xela' not in record or record['xela'] is None:
839
+ continue
840
+ frame_idx = record['frame_idx']
841
+ output_path = flow_dir / f"flow_{frame_idx:04d}.png"
842
+ save_xela_marker_flow(record['xela'], output_path)
843
+ if test_only:
844
+ print(f" 生成: {output_path}")
845
+ return
846
+
847
+ t = data_type
848
+ if t in ['xela_pose', 'pose', 'all']:
849
+ process_xela_pose()
850
+ if t in ['xela_force', 'force', 'all']:
851
+ process_xela_force()
852
+ if t in ['xela_9dtact', 'all']:
853
+ process_xela_9dtact()
854
+
855
+
856
+ # ============================================================
857
+ # 视频生成
858
+ # ============================================================
859
+
860
+ def create_video_from_images(episode_dir, output_path, image_patterns=None,
861
+ subdir=None, fps_fallback=10, multi_sample=False,
862
+ sample_pattern=None):
863
+ """从图像序列创建视频"""
864
+ json_path = episode_dir / 'metadata.json'
865
+ if not json_path.exists():
866
+ return False
867
+
868
+ with open(json_path, 'r') as f:
869
+ records = json.load(f)
870
+
871
+ if not records:
872
+ return False
873
+
874
+ img_dir = episode_dir / subdir if subdir else episode_dir
875
+
876
+ if multi_sample and sample_pattern:
877
+ all_frames = []
878
+ timestamps = []
879
+ for record in records:
880
+ frame_idx = record.get('frame_idx', len(timestamps))
881
+ timestamp = (record.get('sensor_timestamps') or
882
+ record.get('force_timestamps') or
883
+ record.get('timestamp'))
884
+ timestamps.append({'frame_idx': frame_idx, 'timestamp': timestamp})
885
+
886
+ timestamps.sort(key=lambda x: x['frame_idx'])
887
+
888
+ for i, ts_info in enumerate(timestamps):
889
+ frame_idx = ts_info['frame_idx']
890
+ sample_files = []
891
+ for sample_idx in range(100):
892
+ try:
893
+ filename = sample_pattern.format(idx=frame_idx, sample=sample_idx)
894
+ candidate = img_dir / filename
895
+ if candidate.exists():
896
+ sample_files.append(candidate)
897
+ else:
898
+ break
899
+ except (KeyError, ValueError):
900
+ break
901
+
902
+ if not sample_files:
903
+ continue
904
+
905
+ if i < len(timestamps) - 1 and ts_info['timestamp'] and timestamps[i+1]['timestamp']:
906
+ frame_duration = max(0.01, min(2.0, timestamps[i+1]['timestamp'] - ts_info['timestamp']))
907
+ else:
908
+ frame_duration = 1.0 / fps_fallback
909
+
910
+ sample_duration = frame_duration / len(sample_files)
911
+ for sample_file in sample_files:
912
+ all_frames.append({'path': sample_file, 'duration': sample_duration})
913
+
914
+ if len(all_frames) < 2:
915
+ return False
916
+
917
+ # 把 concat 文件放在 episode 目录,使用相对路径
918
+ concat_file = str(episode_dir / '_concat.txt')
919
+ with open(concat_file, 'w') as f:
920
+ for frame in all_frames:
921
+ # 使用相对于 episode_dir 的路径
922
+ rel_path = frame['path'].relative_to(episode_dir)
923
+ f.write(f"file '{rel_path}'\nduration {frame['duration']:.6f}\n")
924
+ rel_path = all_frames[-1]['path'].relative_to(episode_dir)
925
+ f.write(f"file '{rel_path}'\n")
926
+ else:
927
+ if image_patterns is None:
928
+ image_patterns = ["gelsight_{idx:04d}.png", "xela_{idx:04d}.png", "tac02_{idx:04d}.png"]
929
+
930
+ frames = []
931
+ for record in records:
932
+ frame_idx = record.get('frame_idx', len(frames))
933
+ image_file = None
934
+
935
+ for field in ['file_name', 'gsmini_image', 'dtact_image']:
936
+ if field in record and record[field]:
937
+ img_path = record[field].split('/')[-1]
938
+ candidate = img_dir / img_path
939
+ if candidate.exists():
940
+ image_file = candidate
941
+ break
942
+
943
+ if not image_file:
944
+ for pattern in image_patterns:
945
+ try:
946
+ candidate = img_dir / pattern.format(idx=frame_idx)
947
+ if candidate.exists():
948
+ image_file = candidate
949
+ break
950
+ except:
951
+ continue
952
+
953
+ if not image_file and subdir:
954
+ for pattern in [f"frame_{frame_idx:04d}.png", f"heatmap_{frame_idx:04d}.png"]:
955
+ candidate = img_dir / pattern
956
+ if candidate.exists():
957
+ image_file = candidate
958
+ break
959
+
960
+ if image_file:
961
+ timestamp = (record.get('sensor_timestamps') or
962
+ record.get('force_timestamps') or
963
+ record.get('timestamp'))
964
+ frames.append({'path': image_file, 'timestamp': timestamp, 'frame_idx': frame_idx})
965
+
966
+ if len(frames) < 2:
967
+ return False
968
+
969
+ frames.sort(key=lambda x: x['frame_idx'])
970
+
971
+ # 把 concat 文件放在 episode 目录,使用相对路径
972
+ concat_file = str(episode_dir / '_concat.txt')
973
+ with open(concat_file, 'w') as f:
974
+ for i, frame in enumerate(frames):
975
+ if i < len(frames) - 1 and frame['timestamp'] and frames[i+1]['timestamp']:
976
+ duration = max(0.01, min(1.0, frames[i+1]['timestamp'] - frame['timestamp']))
977
+ else:
978
+ duration = 1.0 / fps_fallback
979
+ # 使用相对于 episode_dir 的路径
980
+ rel_path = frame['path'].relative_to(episode_dir)
981
+ f.write(f"file '{rel_path}'\nduration {duration:.6f}\n")
982
+ rel_path = frames[-1]['path'].relative_to(episode_dir)
983
+ f.write(f"file '{rel_path}'\n")
984
+
985
+ # scale 确保宽高是 2 的倍数(libx264 要���)
986
+ cmd = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', concat_file,
987
+ '-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2',
988
+ '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-crf', '23', output_path]
989
+
990
+ try:
991
+ result = subprocess.run(cmd, capture_output=True, text=True)
992
+ return result.returncode == 0
993
+ except FileNotFoundError:
994
+ print(" 错误: ffmpeg 未安装")
995
+ return False
996
+ finally:
997
+ Path(concat_file).unlink(missing_ok=True)
998
+
999
+
1000
+ def generate_videos(data_type='all', test_only=False):
1001
+ """生成视频"""
1002
+
1003
+ def process(data_path, name, **kwargs):
1004
+ data_dir = BASE_DIR / data_path
1005
+ if not data_dir.exists():
1006
+ print(f"{data_path} 不存在")
1007
+ return
1008
+
1009
+ print(f"\n处理 {name}...")
1010
+ episode_dirs = sorted([d for d in data_dir.iterdir() if d.is_dir()],
1011
+ key=lambda x: int(x.name.split('_')[-1]))
1012
+ if test_only:
1013
+ episode_dirs = episode_dirs[:1]
1014
+
1015
+ video_name = kwargs.pop('video_name', 'video.mp4')
1016
+ success = 0
1017
+ for episode_dir in tqdm(episode_dirs, desc=name):
1018
+ if create_video_from_images(episode_dir, str(episode_dir / video_name), **kwargs):
1019
+ success += 1
1020
+ if test_only:
1021
+ print(f" 生成: {episode_dir / video_name}")
1022
+ print(f" 成功: {success}/{len(episode_dirs)}")
1023
+
1024
+ t = data_type
1025
+
1026
+ # force_data
1027
+ if t in ['9dtact_force', 'all']:
1028
+ process('force_data/9dtact_force_h5', '9dtact_force', image_patterns=["gelsight_{idx:04d}.png"])
1029
+ if t in ['xela_force', 'all']:
1030
+ process('force_data/xela_force_h5', 'xela_force', image_patterns=["xela_{idx:04d}.png"])
1031
+ if t in ['gelsight_force', 'all']:
1032
+ process('force_data/gelsight_force_h5', 'gelsight_force', image_patterns=["gelsight_{idx:04d}.png"])
1033
+ if t in ['tac02_force', 'all']:
1034
+ process('force_data/tac02_force_h5', 'tac02_force', image_patterns=["tac02_{idx:04d}.png"])
1035
+
1036
+ # pose_data
1037
+ if t in ['gelsight_pose', 'all']:
1038
+ process('pose_data/gelsight_pose_h5', 'gelsight_pose', multi_sample=True, sample_pattern="images_f{idx:04d}_s{sample}.png")
1039
+ if t in ['9dtact_pose', 'all']:
1040
+ process('pose_data/9dtact_pose_h5', '9dtact_pose', multi_sample=True, sample_pattern="images_f{idx:04d}_s{sample}.png")
1041
+ if t in ['tac02_pose', 'all']:
1042
+ process('pose_data/tac02_pose_h5', 'tac02_pose', multi_sample=True, sample_pattern="tactile_f{idx:04d}_s{sample:02d}.png")
1043
+ if t in ['xela_pose', 'all']:
1044
+ process('pose_data/xela_pose_h5', 'xela_pose', multi_sample=True, sample_pattern="xela_f{idx:04d}_s{sample:02d}.png")
1045
+
1046
+ # marker_flow 视频
1047
+ if t in ['xela_pose_flow', 'all']:
1048
+ process('pose_data/xela_pose_h5', 'xela_pose (marker_flow)', subdir='marker_flow',
1049
+ multi_sample=True, sample_pattern="flow_f{idx:04d}_s{sample:02d}.png", video_name="video_flow.mp4")
1050
+ if t in ['xela_force_flow', 'all']:
1051
+ process('force_data/xela_force_h5', 'xela_force (marker_flow)', subdir='marker_flow',
1052
+ image_patterns=["flow_{idx:04d}.png"], video_name="video_flow.mp4")
1053
+ if t in ['xela_9dtact_flow', 'all']:
1054
+ process('xela_9dtact', 'xela_9dtact (marker_flow)', subdir='xela/marker_flow',
1055
+ image_patterns=["flow_{idx:04d}.png"], video_name="video_flow.mp4")
1056
+
1057
+ # 双传感器
1058
+ if t in ['tacniq_gsmini', 'all']:
1059
+ process('tacniq_gsmini', 'tacniq (gsmini)', subdir='gsmini', image_patterns=["frame_{idx:04d}.png"], video_name="video_gsmini.mp4")
1060
+ process('tacniq_gsmini', 'tacniq (tacniq)', subdir='tacniq', image_patterns=["heatmap_{idx:04d}.png"], video_name="video_tacniq.mp4")
1061
+ if t in ['xela_9dtact', 'all']:
1062
+ process('xela_9dtact', 'xela_9dtact (9dtact)', subdir='9dtact', image_patterns=["frame_{idx:04d}.png"], video_name="video_9dtact.mp4")
1063
+ process('xela_9dtact', 'xela_9dtact (xela)', subdir='xela', image_patterns=["heatmap_{idx:04d}.png"], video_name="video_xela.mp4")
1064
+
1065
+
1066
+ # ============================================================
1067
+ # 打包图像序列
1068
+ # ============================================================
1069
+
1070
+ def pack_images(delete_originals=False):
1071
+ """
1072
+ 把每个 episode 的图像序列打包成 tar 文件(WebDataset 格式)
1073
+ 减少文件数量,便于上传 Hugging Face
1074
+ """
1075
+ import tarfile
1076
+
1077
+ data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact']
1078
+
1079
+ for folder_name in data_folders:
1080
+ folder = BASE_DIR / folder_name
1081
+ if not folder.exists():
1082
+ continue
1083
+
1084
+ # 找到所有 episode 目录
1085
+ episode_dirs = []
1086
+ for p in folder.rglob('metadata.json'):
1087
+ episode_dirs.append(p.parent)
1088
+
1089
+ print(f"\n打包 {folder_name}: {len(episode_dirs)} 个 episode")
1090
+
1091
+ for episode_dir in tqdm(episode_dirs, desc=folder_name):
1092
+ # ��集所有图像文件
1093
+ image_files = list(episode_dir.glob('*.png'))
1094
+
1095
+ # 检查子文件夹中的图像
1096
+ for subdir in ['gsmini', '9dtact', 'tacniq', 'xela', 'marker_flow']:
1097
+ subpath = episode_dir / subdir
1098
+ if subpath.exists():
1099
+ image_files.extend(subpath.glob('*.png'))
1100
+ # 嵌套子文件夹
1101
+ for nested in subpath.iterdir():
1102
+ if nested.is_dir():
1103
+ image_files.extend(nested.glob('*.png'))
1104
+
1105
+ if not image_files:
1106
+ continue
1107
+
1108
+ # 创建 tar 文件
1109
+ tar_path = episode_dir / 'images.tar'
1110
+ with tarfile.open(tar_path, 'w') as tar:
1111
+ for img_path in image_files:
1112
+ # 使用相对路径作为 tar 内的文件名
1113
+ arcname = str(img_path.relative_to(episode_dir))
1114
+ tar.add(img_path, arcname=arcname)
1115
+
1116
+ # 删除原始图像文件
1117
+ if delete_originals:
1118
+ for img_path in image_files:
1119
+ img_path.unlink()
1120
+ # 删除空的子文件夹
1121
+ for subdir in ['gsmini', '9dtact', 'tacniq', 'xela', 'marker_flow']:
1122
+ subpath = episode_dir / subdir
1123
+ if subpath.exists():
1124
+ for nested in subpath.iterdir():
1125
+ if nested.is_dir() and not any(nested.iterdir()):
1126
+ nested.rmdir()
1127
+ if not any(subpath.iterdir()):
1128
+ subpath.rmdir()
1129
+
1130
+ print("\n打包完成!")
1131
+ if delete_originals:
1132
+ print("原始图像文件已删除")
1133
+
1134
+
1135
+ def unpack_images(delete_tar=False):
1136
+ """
1137
+ 解压 tar 文件中的图像
1138
+ """
1139
+ import tarfile
1140
+
1141
+ data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact']
1142
+
1143
+ for folder_name in data_folders:
1144
+ folder = BASE_DIR / folder_name
1145
+ if not folder.exists():
1146
+ continue
1147
+
1148
+ # 找到所有 tar 文件
1149
+ tar_files = list(folder.rglob('images.tar'))
1150
+ if not tar_files:
1151
+ continue
1152
+
1153
+ print(f"\n解压 {folder_name}: {len(tar_files)} 个 tar 文件")
1154
+
1155
+ for tar_path in tqdm(tar_files, desc=folder_name):
1156
+ episode_dir = tar_path.parent
1157
+
1158
+ try:
1159
+ with tarfile.open(tar_path, 'r') as tar:
1160
+ tar.extractall(path=episode_dir)
1161
+
1162
+ if delete_tar:
1163
+ tar_path.unlink()
1164
+ except Exception as e:
1165
+ print(f"\n 解压失败 {tar_path}: {e}")
1166
+
1167
+ print("\n解压完成!")
1168
+ if delete_tar:
1169
+ print("tar 文件已删除")
1170
+
1171
+
1172
+ def clean_images():
1173
+ """删除所有 PNG 图像,只保留视频和 metadata"""
1174
+ data_folders = ['pose_data', 'force_data', 'tacniq_gsmini', 'xela_9dtact']
1175
+
1176
+ total_deleted = 0
1177
+ for folder_name in data_folders:
1178
+ folder = BASE_DIR / folder_name
1179
+ if not folder.exists():
1180
+ continue
1181
+
1182
+ png_files = list(folder.rglob('*.png'))
1183
+ print(f"{folder_name}: {len(png_files)} 个 PNG 文件")
1184
+
1185
+ for png_path in tqdm(png_files, desc=f"删除 {folder_name}"):
1186
+ png_path.unlink()
1187
+ total_deleted += 1
1188
+
1189
+ # 删除空文件夹
1190
+ for folder_name in data_folders:
1191
+ folder = BASE_DIR / folder_name
1192
+ if not folder.exists():
1193
+ continue
1194
+ for subdir in folder.rglob('*'):
1195
+ if subdir.is_dir() and not any(subdir.iterdir()):
1196
+ subdir.rmdir()
1197
+
1198
+ print(f"\n删除完成!共删除 {total_deleted} 个文件")
1199
+
1200
+
1201
+ # ============================================================
1202
+ # 上传
1203
+ # ============================================================
1204
+
1205
+ def upload_to_hf(sync=False):
1206
+ """上传到 Hugging Face
1207
+
1208
+ Args:
1209
+ sync: 如果为 True,删除远端存在但本地不存在的文件
1210
+ """
1211
+ from huggingface_hub import HfApi
1212
+
1213
+ api = HfApi()
1214
+
1215
+ if sync:
1216
+ # 完全同步模式:删除远端多余文件
1217
+ api.upload_large_folder(
1218
+ repo_id="BorisGuo/pair_touch_13m",
1219
+ repo_type="dataset",
1220
+ folder_path=str(BASE_DIR),
1221
+ ignore_patterns=["__pycache__/**", "*.h5"],
1222
+ delete_patterns=["*"], # 删除远端存在但本地不存在的文件
1223
+ )
1224
+ else:
1225
+ # 普通模式:只上传/更新,不删除
1226
+ api.upload_large_folder(
1227
+ repo_id="BorisGuo/pair_touch_13m",
1228
+ repo_type="dataset",
1229
+ folder_path=str(BASE_DIR),
1230
+ ignore_patterns=["__pycache__/**", "*.h5"],
1231
+ )
1232
+ print("上传完成!")
1233
+
1234
+
1235
+ # ============================================================
1236
+ # 主函数
1237
+ # ============================================================
1238
+
1239
+ def main():
1240
+ parser = argparse.ArgumentParser(description="数据集预处理")
1241
+ subparsers = parser.add_subparsers(dest='command', help='命令')
1242
+
1243
+ # extract
1244
+ extract_parser = subparsers.add_parser('extract', help='解析 H5 文件')
1245
+ extract_parser.add_argument('--check', action='store_true', help='仅检查结构')
1246
+ extract_parser.add_argument('--update', action='store_true', help='仅更新 metadata')
1247
+
1248
+ # heatmap
1249
+ heatmap_parser = subparsers.add_parser('heatmap', help='生成热力图')
1250
+ heatmap_parser.add_argument('--test', action='store_true', help='测试模式')
1251
+ heatmap_parser.add_argument('--type', default='all', help='数据类型')
1252
+
1253
+ # marker_flow
1254
+ flow_parser = subparsers.add_parser('marker_flow', help='生成 xela marker flow 可视化')
1255
+ flow_parser.add_argument('--test', action='store_true', help='测试模式')
1256
+ flow_parser.add_argument('--type', default='all',
1257
+ choices=['xela_pose', 'xela_force', 'xela_9dtact', 'pose', 'force', 'all'],
1258
+ help='数据类型')
1259
+
1260
+ # video
1261
+ video_parser = subparsers.add_parser('video', help='生成视频')
1262
+ video_parser.add_argument('--test', action='store_true', help='测试模式')
1263
+ video_parser.add_argument('--type', default='all', help='数据类型')
1264
+
1265
+ # pack
1266
+ pack_parser = subparsers.add_parser('pack', help='打包图像序列为 tar 文件')
1267
+ pack_parser.add_argument('--delete', action='store_true', help='打包后删除原始图像')
1268
+
1269
+ # unpack
1270
+ unpack_parser = subparsers.add_parser('unpack', help='解压 tar 文件中的图像')
1271
+ unpack_parser.add_argument('--delete', action='store_true', help='解压后删除 tar 文件')
1272
+
1273
+ # clean
1274
+ subparsers.add_parser('clean', help='删除所有 PNG 图像,只保留视频')
1275
+
1276
+ # upload
1277
+ upload_parser = subparsers.add_parser('upload', help='上传到 Hugging Face')
1278
+ upload_parser.add_argument('--sync', action='store_true',
1279
+ help='同步模式:删除远端存在但本地不存在的文件')
1280
+
1281
+ # all
1282
+ subparsers.add_parser('all', help='完整流程')
1283
+
1284
+ args = parser.parse_args()
1285
+
1286
+ if args.command == 'extract':
1287
+ if args.check:
1288
+ check_h5_structure()
1289
+ elif args.update:
1290
+ update_metadata()
1291
+ else:
1292
+ extract_all()
1293
+ elif args.command == 'heatmap':
1294
+ print("生成热力图...")
1295
+ generate_heatmaps(args.type, args.test)
1296
+ print("\n完成!")
1297
+ elif args.command == 'marker_flow':
1298
+ print("生成 marker flow...")
1299
+ generate_marker_flow(args.type, args.test)
1300
+ print("\n完成!")
1301
+ elif args.command == 'video':
1302
+ print("生成视频...")
1303
+ generate_videos(args.type, args.test)
1304
+ print("\n完成!")
1305
+ elif args.command == 'pack':
1306
+ print("打包图像序列...")
1307
+ pack_images(delete_originals=args.delete)
1308
+ elif args.command == 'unpack':
1309
+ print("解压图像...")
1310
+ unpack_images(delete_tar=args.delete)
1311
+ elif args.command == 'clean':
1312
+ print("清理图像文件...")
1313
+ clean_images()
1314
+ elif args.command == 'upload':
1315
+ upload_to_hf(sync=args.sync)
1316
+ elif args.command == 'all':
1317
+ print("="*60 + "\n完整流程\n" + "="*60)
1318
+ print("\n[1/4] 解析 H5 文件...")
1319
+ extract_all()
1320
+ print("\n[2/4] 生成热力图...")
1321
+ generate_heatmaps('all', False)
1322
+ print("\n[3/4] 生成视频...")
1323
+ generate_videos('all', False)
1324
+ print("\n[4/4] 更新 metadata...")
1325
+ update_metadata()
1326
+ print("\n" + "="*60 + "\n完成!\n" + "="*60)
1327
+ else:
1328
+ parser.print_help()
1329
+
1330
+
1331
+ if __name__ == "__main__":
1332
+ main()
1333
+