Upload folder using huggingface_hub
Browse files
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
LIVING_ROOM_SCENE3_place_the_ketchup_bottle_upside_down_in_the_wooden_tray_demo_1752513943.hdf5 filter=lfs diff=lfs merge=lfs -text
|
LIVING_ROOM_SCENE3_place_the_ketchup_bottle_upside_down_in_the_wooden_tray_demo_1752513943.hdf5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a3441b8dad0253843c7b63a24ed5aec27080d56518063c53c54582810881c9fc
|
| 3 |
+
size 1768073482
|
stat.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# + Group: data/demo_8
|
| 2 |
+
# - Dataset: data/demo_8/actions, shape: (79, 7), dtype: float64
|
| 3 |
+
# - Dataset: data/demo_8/dones, shape: (79,), dtype: uint8
|
| 4 |
+
# + Group: data/demo_8/obs
|
| 5 |
+
# - Dataset: data/demo_8/obs/agentview_rgb, shape: (79, 128, 128, 3), dtype: uint8
|
| 6 |
+
# - Dataset: data/demo_8/obs/ee_ori, shape: (79, 3), dtype: float64
|
| 7 |
+
# - Dataset: data/demo_8/obs/ee_pos, shape: (79, 3), dtype: float64
|
| 8 |
+
# - Dataset: data/demo_8/obs/ee_states, shape: (79, 6), dtype: float64
|
| 9 |
+
# - Dataset: data/demo_8/obs/eye_in_hand_rgb, shape: (79, 128, 128, 3), dtype: uint8
|
| 10 |
+
# - Dataset: data/demo_8/obs/gripper_states, shape: (79, 2), dtype: float64
|
| 11 |
+
# - Dataset: data/demo_8/obs/joint_states, shape: (79, 7), dtype: float64
|
| 12 |
+
# - Dataset: data/demo_8/rewards, shape: (79,), dtype: uint8
|
| 13 |
+
# - Dataset: data/demo_8/robot_states, shape: (79, 9), dtype: float64
|
| 14 |
+
# - Dataset: data/demo_8/states, shape: (79, 92), dtype: float64
|
| 15 |
+
# + Group: data/demo_9
|
| 16 |
+
# - Dataset: data/demo_9/actions, shape: (89, 7), dtype: float64
|
| 17 |
+
# - Dataset: data/demo_9/dones, shape: (89,), dtype: uint8
|
| 18 |
+
# + Group: data/demo_9/obs
|
| 19 |
+
# - Dataset: data/demo_9/obs/agentview_rgb, shape: (89, 128, 128, 3), dtype: uint8
|
| 20 |
+
# - Dataset: data/demo_9/obs/ee_ori, shape: (89, 3), dtype: float64
|
| 21 |
+
# - Dataset: data/demo_9/obs/ee_pos, shape: (89, 3), dtype: float64
|
| 22 |
+
# - Dataset: data/demo_9/obs/ee_states, shape: (89, 6), dtype: float64
|
| 23 |
+
# - Dataset: data/demo_9/obs/eye_in_hand_rgb, shape: (89, 128, 128, 3), dtype: uint8
|
| 24 |
+
# - Dataset: data/demo_9/obs/gripper_states, shape: (89, 2), dtype: float64
|
| 25 |
+
# - Dataset: data/demo_9/obs/joint_states, shape: (89, 7), dtype: float64
|
| 26 |
+
# - Dataset: data/demo_9/rewards, shape: (89,), dtype: uint8
|
| 27 |
+
# - Dataset: data/demo_9/robot_states, shape: (89, 9), dtype: float64
|
| 28 |
+
# - Dataset: data/demo_9/states, shape: (89, 92), dtype: float64
|
| 29 |
+
|
| 30 |
+
# The above is the structure of the HDF5 file. Read all the HDF5 files in the directory, and calculate the mean, std, min, max, q01, q99 of the actions, obs/ee_states, gripper_states, joint_states of all the files.
|
| 31 |
+
|
| 32 |
+
import h5py
|
| 33 |
+
import numpy as np
|
| 34 |
+
import os
|
| 35 |
+
|
| 36 |
+
def calculate_statistics(hdf5_path):
|
| 37 |
+
actions = []
|
| 38 |
+
ee_states = []
|
| 39 |
+
gripper_states = []
|
| 40 |
+
joint_states = []
|
| 41 |
+
|
| 42 |
+
with h5py.File(hdf5_path, 'r') as f:
|
| 43 |
+
for demo in f['data']:
|
| 44 |
+
actions.append(f[f'data/{demo}/actions'][:])
|
| 45 |
+
ee_states.append(f[f'data/{demo}/obs/ee_states'][:])
|
| 46 |
+
gripper_states.append(f[f'data/{demo}/obs/gripper_states'][:])
|
| 47 |
+
joint_states.append(f[f'data/{demo}/obs/joint_states'][:])
|
| 48 |
+
actions = np.concatenate(actions, axis=0)
|
| 49 |
+
ee_states = np.concatenate(ee_states, axis=0)
|
| 50 |
+
gripper_states = np.concatenate(gripper_states, axis=0)
|
| 51 |
+
joint_states = np.concatenate(joint_states, axis=0)
|
| 52 |
+
stats = {
|
| 53 |
+
'actions': {
|
| 54 |
+
'mean': np.mean(actions, axis=0),
|
| 55 |
+
'std': np.std(actions, axis=0),
|
| 56 |
+
'min': np.min(actions, axis=0),
|
| 57 |
+
'max': np.max(actions, axis=0),
|
| 58 |
+
'q01': np.percentile(actions, 1, axis=0),
|
| 59 |
+
'q99': np.percentile(actions, 99, axis=0)
|
| 60 |
+
},
|
| 61 |
+
'ee_states': {
|
| 62 |
+
'mean': np.mean(ee_states, axis=0),
|
| 63 |
+
'std': np.std(ee_states, axis=0),
|
| 64 |
+
'min': np.min(ee_states, axis=0),
|
| 65 |
+
'max': np.max(ee_states, axis=0),
|
| 66 |
+
'q01': np.percentile(ee_states, 1, axis=0),
|
| 67 |
+
'q99': np.percentile(ee_states, 99, axis=0)
|
| 68 |
+
},
|
| 69 |
+
'gripper_states': {
|
| 70 |
+
'mean': np.mean(gripper_states, axis=0),
|
| 71 |
+
'std': np.std(gripper_states, axis=0),
|
| 72 |
+
'min': np.min(gripper_states, axis=0),
|
| 73 |
+
'max': np.max(gripper_states, axis=0),
|
| 74 |
+
'q01': np.percentile(gripper_states, 1, axis=0),
|
| 75 |
+
'q99': np.percentile(gripper_states, 99, axis=0)
|
| 76 |
+
},
|
| 77 |
+
'joint_states': {
|
| 78 |
+
'mean': np.mean(joint_states, axis=0),
|
| 79 |
+
'std': np.std(joint_states, axis=0),
|
| 80 |
+
'min': np.min(joint_states, axis=0),
|
| 81 |
+
'max': np.max(joint_states, axis=0),
|
| 82 |
+
'q01': np.percentile(joint_states, 1, axis=0),
|
| 83 |
+
'q99': np.percentile(joint_states, 99, axis=0)
|
| 84 |
+
}
|
| 85 |
+
}
|
| 86 |
+
return stats
|
| 87 |
+
|
| 88 |
+
def process_directory(directory):
|
| 89 |
+
all_stats = {
|
| 90 |
+
'actions': [],
|
| 91 |
+
'ee_states': [],
|
| 92 |
+
'gripper_states': [],
|
| 93 |
+
'joint_states': []
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
for filename in os.listdir(directory):
|
| 97 |
+
if filename.endswith('.hdf5'):
|
| 98 |
+
hdf5_path = os.path.join(directory, filename)
|
| 99 |
+
stats = calculate_statistics(hdf5_path)
|
| 100 |
+
for key in all_stats:
|
| 101 |
+
all_stats[key].append(stats[key])
|
| 102 |
+
|
| 103 |
+
# Calculate overall statistics
|
| 104 |
+
overall_stats = {}
|
| 105 |
+
for key, values in all_stats.items():
|
| 106 |
+
# values: a list of dictionaries
|
| 107 |
+
means = np.array([v['mean'] for v in values])
|
| 108 |
+
stds = np.array([v['std'] for v in values])
|
| 109 |
+
mins = np.array([v['min'] for v in values])
|
| 110 |
+
maxs = np.array([v['max'] for v in values])
|
| 111 |
+
q01s = np.array([v['q01'] for v in values])
|
| 112 |
+
q99s = np.array([v['q99'] for v in values])
|
| 113 |
+
overall_stats[key] = {
|
| 114 |
+
'mean': np.mean(means, axis=0),
|
| 115 |
+
'std': np.mean(stds, axis=0),
|
| 116 |
+
'min': np.min(mins, axis=0),
|
| 117 |
+
'max': np.max(maxs, axis=0),
|
| 118 |
+
'q01': np.mean(q01s, axis=0),
|
| 119 |
+
'q99': np.mean(q99s, axis=0)
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
return overall_stats
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
directory = '/home2/czhang/datasets/LIBERO/libero_spatial'
|
| 127 |
+
stats = process_directory(directory)
|
| 128 |
+
for key, value in stats.items():
|
| 129 |
+
print(f"{key}:")
|
| 130 |
+
for stat_name, stat_value in value.items():
|
| 131 |
+
print(f" {stat_name}: {stat_value}")
|
| 132 |
+
print("Statistics calculated successfully.")
|