yijiangg commited on
Commit
cf0cd85
·
verified ·
1 Parent(s): 60a7180

Upload convert_libero_data_to_lerobot.py

Browse files
Files changed (1) hide show
  1. convert_libero_data_to_lerobot.py +96 -0
convert_libero_data_to_lerobot.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ git clone https://github.com/huggingface/lerobot.git
3
+ cd lerobot
4
+ pip install -e .
5
+
6
+ pip install tensorflow tensorflow_datasets
7
+
8
+ git clone https://huggingface.co/datasets/openvla/modified_libero_rlds
9
+ cd modified_libero_rlds
10
+ git lfs pull
11
+ """
12
+
13
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
14
+ import tensorflow as tf
15
+ import tensorflow_datasets as tfds
16
+
17
+ data_dir = 'modified_libero_rlds'
18
+ REPO_NAME = 'droid-ai/libero'
19
+ RAW_DATASET_NAMES = [
20
+ 'libero_10_no_noops',
21
+ 'libero_goal_no_noops',
22
+ 'libero_object_no_noops',
23
+ 'libero_spatial_no_noops',
24
+ ]
25
+
26
+ dataset = LeRobotDataset.create(
27
+ repo_id=REPO_NAME,
28
+ robot_type='libero',
29
+ fps=30,
30
+ features={
31
+ 'is_first': {
32
+ 'dtype': 'bool',
33
+ "shape": (1,),
34
+ },
35
+ 'is_last': {
36
+ 'dtype': 'bool',
37
+ "shape": (1,),
38
+ },
39
+ 'is_terminal': {
40
+ 'dtype': 'bool',
41
+ "shape": (1,),
42
+ },
43
+ 'image': {
44
+ 'dtype': 'image',
45
+ 'shape': (256, 256, 3),
46
+ },
47
+ 'wrist_image': {
48
+ 'dtype': 'image',
49
+ 'shape': (256, 256, 3),
50
+ },
51
+ 'state': {
52
+ 'dtype': 'float32',
53
+ 'shape': (8,),
54
+ },
55
+ 'joint_state': {
56
+ 'dtype': 'float32',
57
+ 'shape': (7,),
58
+ },
59
+ 'actions': {
60
+ 'dtype': 'float32',
61
+ 'shape': (7,),
62
+ },
63
+ 'discount': {
64
+ 'dtype': 'float32',
65
+ 'shape': (1,),
66
+ },
67
+ 'reward': {
68
+ 'dtype': 'float32',
69
+ 'shape': (1,),
70
+ },
71
+ },
72
+ image_writer_threads=128,
73
+ image_writer_processes=64,
74
+ )
75
+
76
+ for raw_dataset_name in RAW_DATASET_NAMES:
77
+ raw_dataset = tfds.load(raw_dataset_name, data_dir=data_dir, split='train')
78
+ raw_dataset = raw_dataset.prefetch(tf.data.AUTOTUNE)
79
+ for episode in raw_dataset:
80
+ for step in episode['steps'].as_numpy_iterator():
81
+ dataset.add_frame({
82
+ 'task': step['language_instruction'].decode(),
83
+ 'is_first': step['is_first'][None],
84
+ 'is_last': step['is_last'][None],
85
+ 'is_terminal': step['is_terminal'][None],
86
+ 'image': step['observation']['image'],
87
+ 'wrist_image': step['observation']['wrist_image'],
88
+ 'state': step['observation']['state'],
89
+ 'joint_state': step['observation']['joint_state'],
90
+ 'actions': step['action'],
91
+ 'discount': step['discount'][None],
92
+ 'reward': step['reward'][None],
93
+ })
94
+ dataset.save_episode()
95
+
96
+ dataset.push_to_hub(upload_large_folder=True)