BorisGuo commited on
Commit
9d45e61
·
verified ·
1 Parent(s): 23af9e0

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. extract_h5.py +71 -47
extract_h5.py CHANGED
@@ -1,30 +1,61 @@
1
  #!/usr/bin/env python3
2
  """
3
  解析 H5 文件并导出为 Hugging Face Dataset Viewer 兼容格式
4
- 按帧展开,每行一帧数据
 
 
 
5
  """
6
 
 
7
  import h5py
8
  import numpy as np
9
  from pathlib import Path
10
  from PIL import Image
11
  from tqdm import tqdm
12
  import json
13
- import matplotlib
14
- matplotlib.use('Agg') # 无 GUI 后端
15
- import matplotlib.pyplot as plt
16
 
17
 
18
- def save_heatmap(data, output_path, cmap='viridis'):
19
- """将矩阵保存为热力图 PNG"""
20
- fig, ax = plt.subplots(figsize=(6, 4))
21
- im = ax.imshow(data, cmap=cmap, aspect='auto')
22
- plt.colorbar(im, ax=ax)
23
- ax.set_xlabel('Column')
24
- ax.set_ylabel('Row')
25
- plt.tight_layout()
26
- plt.savefig(output_path, dpi=100)
27
- plt.close(fig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
  def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
@@ -42,7 +73,7 @@ def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
42
 
43
  # 预处理所有数据
44
  data_cache = {}
45
- image_paths = {} # key -> list of paths per frame
46
 
47
  for key in keys:
48
  arr = f[key][:]
@@ -63,7 +94,7 @@ def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
63
 
64
  elif len(arr.shape) == 5: # 多视角 (N, V, H, W, C)
65
  num_views = arr.shape[1]
66
- paths = [] # 每帧的多视角路径
67
  for frame_idx in range(arr.shape[0]):
68
  frame_paths = []
69
  for view_idx in range(num_views):
@@ -86,18 +117,18 @@ def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
86
  if subset_path:
87
  record["subset"] = subset_path
88
 
89
- # 添加该帧的图像路径 (使用 file_name 主图像列,符合 HF 格式)
90
  for key, paths in image_paths.items():
91
  if isinstance(paths[0], list): # 多视角
92
  for v_idx, p in enumerate(paths[frame_idx]):
93
  if v_idx == 0:
94
- record["file_name"] = p # 第一个视角作为主图像
95
  record[f"image_v{v_idx}"] = p
96
  else:
97
- record["file_name"] = paths[frame_idx] # 主图像
98
  record["image"] = paths[frame_idx]
99
 
100
- # 添加静态数据(如背景图)
101
  for key, val in data_cache.items():
102
  if key.endswith("_image") or key.endswith("_num_views"):
103
  record[key] = val
@@ -110,11 +141,8 @@ def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
110
  if 'translations' in data_cache:
111
  record["translation"] = data_cache['translations'][frame_idx]
112
  if 'tactile' in data_cache:
113
- # tactile 数据直接放 JSON (tac02: 20x66)
114
  record["tactile"] = data_cache['tactile'][frame_idx]
115
-
116
  if 'xela' in data_cache:
117
- # xela 数据直接放 JSON (xela: 10x72)
118
  record["xela"] = data_cache['xela'][frame_idx]
119
 
120
  record["num_frames"] = num_frames
@@ -137,22 +165,19 @@ def extract_tacniq_gsmini(h5_path, output_dir, episode_id, subset_path=""):
137
  gsmini = f['gsmini'][:]
138
  tacniq = f['tacniq'][:].tolist()
139
 
140
- # 保存背景图
141
  Image.fromarray(bg).save(episode_dir / "bg.png")
142
  bg_path = f"{rel_prefix}/bg.png"
143
 
144
  num_frames = len(gsmini)
145
 
146
- # 按帧展开
147
  for frame_idx in range(num_frames):
148
- # 保存该帧图像
149
  filename = f"gsmini_{frame_idx:04d}.png"
150
  Image.fromarray(gsmini[frame_idx]).save(episode_dir / filename)
151
 
152
  record = {
153
  "episode_id": episode_id,
154
  "frame_idx": frame_idx,
155
- "file_name": f"{rel_prefix}/{filename}", # HF 格式必需
156
  "image": f"{rel_prefix}/{filename}",
157
  "bg_image": bg_path,
158
  "tacniq": tacniq[frame_idx] if frame_idx < len(tacniq) else None,
@@ -181,22 +206,19 @@ def extract_xela_9dtact(h5_path, output_dir, episode_id, subset_path=""):
181
  dtact = f['9dtact'][:]
182
  xela = f['xela'][:].tolist()
183
 
184
- # 保存背景图
185
  Image.fromarray(bg).save(episode_dir / "bg.png")
186
  bg_path = f"{rel_prefix}/bg.png"
187
 
188
  num_frames = len(dtact)
189
 
190
- # 按帧展开
191
  for frame_idx in range(num_frames):
192
- # 保存该帧图像
193
  filename = f"9dtact_{frame_idx:04d}.png"
194
  Image.fromarray(dtact[frame_idx]).save(episode_dir / filename)
195
 
196
  record = {
197
  "episode_id": episode_id,
198
  "frame_idx": frame_idx,
199
- "file_name": f"{rel_prefix}/{filename}", # HF 格式必需
200
  "image": f"{rel_prefix}/{filename}",
201
  "bg_image": bg_path,
202
  "xela": xela[frame_idx] if frame_idx < len(xela) else None,
@@ -223,7 +245,6 @@ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
223
  with h5py.File(h5_path, 'r') as f:
224
  keys = list(f.keys())
225
 
226
- # 找到帧数(通过时间戳或图像序列)
227
  num_frames = 0
228
  data_cache = {}
229
  image_paths = {}
@@ -232,11 +253,11 @@ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
232
  arr = f[key][:]
233
 
234
  if arr.dtype == np.uint8:
235
- if len(arr.shape) == 3: # 背景图
236
  filename = f"{key}.png"
237
  Image.fromarray(arr).save(episode_dir / filename)
238
  data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}"
239
- elif len(arr.shape) == 4: # 图像序列
240
  num_frames = max(num_frames, len(arr))
241
  paths = []
242
  for i, img in enumerate(arr):
@@ -249,7 +270,6 @@ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
249
  if len(arr.shape) >= 1:
250
  num_frames = max(num_frames, len(arr))
251
 
252
- # 按帧展开
253
  for frame_idx in range(num_frames):
254
  record = {
255
  "episode_id": episode_id,
@@ -260,13 +280,11 @@ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
260
  if subset_path:
261
  record["subset"] = subset_path
262
 
263
- # 添加图像路径
264
  for key, paths in image_paths.items():
265
  if frame_idx < len(paths):
266
- record["file_name"] = paths[frame_idx] # HF 格式必需
267
  record["image"] = paths[frame_idx]
268
 
269
- # 添加静态数据
270
  for key, val in data_cache.items():
271
  if key.endswith("_image"):
272
  record[key] = val
@@ -278,25 +296,20 @@ def extract_force_data(h5_path, output_dir, episode_id, subset_path=""):
278
  return records
279
 
280
 
281
- def main():
282
- base_dir = Path(__file__).parent
283
-
284
- # 找到所有 *_h5 文件夹
285
  h5_folders = [d for d in base_dir.iterdir() if d.is_dir() and d.name.endswith('_h5')]
286
 
287
  for h5_folder in h5_folders:
288
- # 创建对应的输出文件夹(去掉 _h5 后缀)
289
  output_folder = base_dir / h5_folder.name.replace('_h5', '')
290
  output_folder.mkdir(exist_ok=True)
291
 
292
- # 获取所有 h5 文件(包括子目录)
293
  h5_files = list(h5_folder.rglob('*.h5'))
294
  print(f"\n处理 {h5_folder.name}: {len(h5_files)} 个文件 -> {output_folder.name}/")
295
 
296
  all_records = []
297
 
298
  for h5_path in tqdm(h5_files, desc=h5_folder.name):
299
- # 计算相对路径,保持子目录结构
300
  relative = h5_path.relative_to(h5_folder)
301
  sub_output_dir = output_folder / relative.parent
302
  sub_output_dir.mkdir(parents=True, exist_ok=True)
@@ -318,7 +331,6 @@ def main():
318
 
319
  all_records.extend(records)
320
 
321
- # 保存该 episode 的 JSON 文件
322
  episode_dir = sub_output_dir / episode_id
323
  json_path = episode_dir / "metadata.json"
324
  with open(json_path, 'w') as f:
@@ -327,7 +339,6 @@ def main():
327
  except Exception as e:
328
  print(f"\nError: {h5_path}: {e}")
329
 
330
- # 保存 metadata.jsonl(Hugging Face Dataset Viewer 格式)
331
  jsonl_path = output_folder / "metadata.jsonl"
332
  with open(jsonl_path, 'w') as f:
333
  for record in all_records:
@@ -338,5 +349,18 @@ def main():
338
  print("\n解析完成!")
339
 
340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
  if __name__ == "__main__":
342
  main()
 
1
  #!/usr/bin/env python3
2
  """
3
  解析 H5 文件并导出为 Hugging Face Dataset Viewer 兼容格式
4
+
5
+ 用法:
6
+ python extract_h5.py # 解析所有 H5 文件
7
+ python extract_h5.py --check # 仅检查 H5 文件结构(不解析)
8
  """
9
 
10
+ import argparse
11
  import h5py
12
  import numpy as np
13
  from pathlib import Path
14
  from PIL import Image
15
  from tqdm import tqdm
16
  import json
17
+ from collections import defaultdict
 
 
18
 
19
 
20
+ def check_h5_structure(base_dir):
21
+ """检查所有 H5 文件的字段结构"""
22
+ folder_keys = defaultdict(lambda: defaultdict(set))
23
+
24
+ h5_folders = [d for d in base_dir.iterdir() if d.is_dir() and d.name.endswith('_h5')]
25
+
26
+ for h5_folder in sorted(h5_folders):
27
+ h5_files = list(h5_folder.rglob('*.h5'))
28
+ print(f"\n{'='*60}")
29
+ print(f"文件夹: {h5_folder.name} ({len(h5_files)} 个文件)")
30
+ print('='*60)
31
+
32
+ # 检查前 3 个文件作为示例
33
+ for h5_path in h5_files[:3]:
34
+ print(f"\n {h5_path.name}:")
35
+ with h5py.File(h5_path, 'r') as f:
36
+ for key in sorted(f.keys()):
37
+ arr = f[key]
38
+ print(f" - {key}: shape={arr.shape}, dtype={arr.dtype}")
39
+ folder_keys[h5_folder.name][key].add(str(arr.shape))
40
+
41
+ # 汇总该文件夹的所有键
42
+ print(f"\n 汇总 (检查全部 {len(h5_files)} 个文件):")
43
+ for h5_path in h5_files:
44
+ with h5py.File(h5_path, 'r') as f:
45
+ for key in f.keys():
46
+ folder_keys[h5_folder.name][key].add(str(f[key].shape))
47
+
48
+ for key, shapes in sorted(folder_keys[h5_folder.name].items()):
49
+ print(f" - {key}: shapes={list(shapes)}")
50
+
51
+ # 总汇总
52
+ print(f"\n{'='*60}")
53
+ print("总汇总 - 所有文件夹的字段:")
54
+ print('='*60)
55
+ for folder, keys in sorted(folder_keys.items()):
56
+ print(f"\n{folder}:")
57
+ for key, shapes in sorted(keys.items()):
58
+ print(f" - {key}: {list(shapes)}")
59
 
60
 
61
  def extract_pose_data(h5_path, output_dir, episode_id, subset_path=""):
 
73
 
74
  # 预处理所有数据
75
  data_cache = {}
76
+ image_paths = {}
77
 
78
  for key in keys:
79
  arr = f[key][:]
 
94
 
95
  elif len(arr.shape) == 5: # 多视角 (N, V, H, W, C)
96
  num_views = arr.shape[1]
97
+ paths = []
98
  for frame_idx in range(arr.shape[0]):
99
  frame_paths = []
100
  for view_idx in range(num_views):
 
117
  if subset_path:
118
  record["subset"] = subset_path
119
 
120
+ # 添加该帧的图像路径 (file_name 为 HF 格式必需)
121
  for key, paths in image_paths.items():
122
  if isinstance(paths[0], list): # 多视角
123
  for v_idx, p in enumerate(paths[frame_idx]):
124
  if v_idx == 0:
125
+ record["file_name"] = p
126
  record[f"image_v{v_idx}"] = p
127
  else:
128
+ record["file_name"] = paths[frame_idx]
129
  record["image"] = paths[frame_idx]
130
 
131
+ # 添加静态数据
132
  for key, val in data_cache.items():
133
  if key.endswith("_image") or key.endswith("_num_views"):
134
  record[key] = val
 
141
  if 'translations' in data_cache:
142
  record["translation"] = data_cache['translations'][frame_idx]
143
  if 'tactile' in data_cache:
 
144
  record["tactile"] = data_cache['tactile'][frame_idx]
 
145
  if 'xela' in data_cache:
 
146
  record["xela"] = data_cache['xela'][frame_idx]
147
 
148
  record["num_frames"] = num_frames
 
165
  gsmini = f['gsmini'][:]
166
  tacniq = f['tacniq'][:].tolist()
167
 
 
168
  Image.fromarray(bg).save(episode_dir / "bg.png")
169
  bg_path = f"{rel_prefix}/bg.png"
170
 
171
  num_frames = len(gsmini)
172
 
 
173
  for frame_idx in range(num_frames):
 
174
  filename = f"gsmini_{frame_idx:04d}.png"
175
  Image.fromarray(gsmini[frame_idx]).save(episode_dir / filename)
176
 
177
  record = {
178
  "episode_id": episode_id,
179
  "frame_idx": frame_idx,
180
+ "file_name": f"{rel_prefix}/{filename}",
181
  "image": f"{rel_prefix}/{filename}",
182
  "bg_image": bg_path,
183
  "tacniq": tacniq[frame_idx] if frame_idx < len(tacniq) else None,
 
206
  dtact = f['9dtact'][:]
207
  xela = f['xela'][:].tolist()
208
 
 
209
  Image.fromarray(bg).save(episode_dir / "bg.png")
210
  bg_path = f"{rel_prefix}/bg.png"
211
 
212
  num_frames = len(dtact)
213
 
 
214
  for frame_idx in range(num_frames):
 
215
  filename = f"9dtact_{frame_idx:04d}.png"
216
  Image.fromarray(dtact[frame_idx]).save(episode_dir / filename)
217
 
218
  record = {
219
  "episode_id": episode_id,
220
  "frame_idx": frame_idx,
221
+ "file_name": f"{rel_prefix}/{filename}",
222
  "image": f"{rel_prefix}/{filename}",
223
  "bg_image": bg_path,
224
  "xela": xela[frame_idx] if frame_idx < len(xela) else None,
 
245
  with h5py.File(h5_path, 'r') as f:
246
  keys = list(f.keys())
247
 
 
248
  num_frames = 0
249
  data_cache = {}
250
  image_paths = {}
 
253
  arr = f[key][:]
254
 
255
  if arr.dtype == np.uint8:
256
+ if len(arr.shape) == 3:
257
  filename = f"{key}.png"
258
  Image.fromarray(arr).save(episode_dir / filename)
259
  data_cache[f"{key}_image"] = f"{rel_prefix}/{filename}"
260
+ elif len(arr.shape) == 4:
261
  num_frames = max(num_frames, len(arr))
262
  paths = []
263
  for i, img in enumerate(arr):
 
270
  if len(arr.shape) >= 1:
271
  num_frames = max(num_frames, len(arr))
272
 
 
273
  for frame_idx in range(num_frames):
274
  record = {
275
  "episode_id": episode_id,
 
280
  if subset_path:
281
  record["subset"] = subset_path
282
 
 
283
  for key, paths in image_paths.items():
284
  if frame_idx < len(paths):
285
+ record["file_name"] = paths[frame_idx]
286
  record["image"] = paths[frame_idx]
287
 
 
288
  for key, val in data_cache.items():
289
  if key.endswith("_image"):
290
  record[key] = val
 
296
  return records
297
 
298
 
299
+ def extract_all(base_dir):
300
+ """解析所有 H5 文件"""
 
 
301
  h5_folders = [d for d in base_dir.iterdir() if d.is_dir() and d.name.endswith('_h5')]
302
 
303
  for h5_folder in h5_folders:
 
304
  output_folder = base_dir / h5_folder.name.replace('_h5', '')
305
  output_folder.mkdir(exist_ok=True)
306
 
 
307
  h5_files = list(h5_folder.rglob('*.h5'))
308
  print(f"\n处理 {h5_folder.name}: {len(h5_files)} 个文件 -> {output_folder.name}/")
309
 
310
  all_records = []
311
 
312
  for h5_path in tqdm(h5_files, desc=h5_folder.name):
 
313
  relative = h5_path.relative_to(h5_folder)
314
  sub_output_dir = output_folder / relative.parent
315
  sub_output_dir.mkdir(parents=True, exist_ok=True)
 
331
 
332
  all_records.extend(records)
333
 
 
334
  episode_dir = sub_output_dir / episode_id
335
  json_path = episode_dir / "metadata.json"
336
  with open(json_path, 'w') as f:
 
339
  except Exception as e:
340
  print(f"\nError: {h5_path}: {e}")
341
 
 
342
  jsonl_path = output_folder / "metadata.jsonl"
343
  with open(jsonl_path, 'w') as f:
344
  for record in all_records:
 
349
  print("\n解析完成!")
350
 
351
 
352
+ def main():
353
+ parser = argparse.ArgumentParser(description="解析 H5 文件为 HuggingFace Dataset 格式")
354
+ parser.add_argument('--check', action='store_true', help='仅检查 H5 文件结构(不解析)')
355
+ args = parser.parse_args()
356
+
357
+ base_dir = Path(__file__).parent
358
+
359
+ if args.check:
360
+ check_h5_structure(base_dir)
361
+ else:
362
+ extract_all(base_dir)
363
+
364
+
365
  if __name__ == "__main__":
366
  main()