THURCSCT commited on
Commit
b516efc
·
verified ·
1 Parent(s): dc834e4

Delete folder scripts with huggingface_hub

Browse files
scripts/check_dataset_integrity.py DELETED
@@ -1,40 +0,0 @@
1
- """A script to check if any demonstration dataset does not have the exact number of demonstration trajectories"""
2
-
3
- from pathlib import Path
4
- import h5py
5
- import numpy as np
6
-
7
- from libero.libero import get_libero_path
8
-
9
- error_datasets = []
10
- for demo_file_name in Path(get_libero_path("datasets")).rglob("*hdf5"):
11
-
12
- demo_file = h5py.File(demo_file_name)
13
-
14
- count = 0
15
- for key in demo_file["data"].keys():
16
- if "demo" in key:
17
- count += 1
18
-
19
- if count == 50:
20
- traj_lengths = []
21
- action_min = np.inf
22
- action_max = -np.inf
23
- for demo_name in demo_file["data"].keys():
24
- traj_lengths.append(demo_file["data/{}/actions".format(demo_name)].shape[0])
25
- traj_lengths = np.array(traj_lengths)
26
- print(f"[info] dataset {demo_file_name} is in tact, test passed \u2714")
27
- print(np.mean(traj_lengths), " +- ", np.std(traj_lengths))
28
- if demo_file["data"].attrs["tag"] == "libero-v1":
29
- print("Version correct")
30
-
31
- print("=========================================")
32
-
33
- else:
34
- print("[error] !!!")
35
- error_datasets.append(demo_file_name)
36
-
37
- if len(error_datasets) > 0:
38
- print("[error] The following datasets are corrupted:")
39
- for dataset in error_datasets:
40
- print(dataset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/collect_demonstration.py DELETED
@@ -1,356 +0,0 @@
1
- import argparse
2
- import cv2
3
- import datetime
4
- import h5py
5
- import init_path
6
- import json
7
- import numpy as np
8
- import os
9
- import robosuite as suite
10
- import time
11
- from glob import glob
12
- from robosuite import load_controller_config
13
- from robosuite.wrappers import DataCollectionWrapper, VisualizationWrapper
14
- from robosuite.utils.input_utils import input2action
15
-
16
-
17
- import libero.libero.envs.bddl_utils as BDDLUtils
18
- from libero.libero.envs import *
19
-
20
-
21
- def collect_human_trajectory(
22
- env, device, arm, env_configuration, problem_info, remove_directory=[]
23
- ):
24
- """
25
- Use the device (keyboard or SpaceNav 3D mouse) to collect a demonstration.
26
- The rollout trajectory is saved to files in npz format.
27
- Modify the DataCollectionWrapper wrapper to add new fields or change data formats.
28
-
29
- Args:
30
- env (MujocoEnv): environment to control
31
- device (Device): to receive controls from the device
32
- arms (str): which arm to control (eg bimanual) 'right' or 'left'
33
- env_configuration (str): specified environment configuration
34
- """
35
-
36
- reset_success = False
37
- while not reset_success:
38
- try:
39
- env.reset()
40
- reset_success = True
41
- except:
42
- continue
43
-
44
- # ID = 2 always corresponds to agentview
45
- env.render()
46
-
47
- task_completion_hold_count = (
48
- -1
49
- ) # counter to collect 10 timesteps after reaching goal
50
- device.start_control()
51
-
52
- # Loop until we get a reset from the input or the task completes
53
- saving = True
54
- count = 0
55
-
56
- while True:
57
- count += 1
58
- # Set active robot
59
- active_robot = (
60
- env.robots[0]
61
- if env_configuration == "bimanual"
62
- else env.robots[arm == "left"]
63
- )
64
-
65
- # Get the newest action
66
- action, grasp = input2action(
67
- device=device,
68
- robot=active_robot,
69
- active_arm=arm,
70
- env_configuration=env_configuration,
71
- )
72
-
73
- # If action is none, then this a reset so we should break
74
- if action is None:
75
- print("Break")
76
- saving = False
77
- break
78
-
79
- # Run environment step
80
-
81
- env.step(action)
82
- env.render()
83
- # Also break if we complete the task
84
- if task_completion_hold_count == 0:
85
- break
86
-
87
- # state machine to check for having a success for 10 consecutive timesteps
88
- if env._check_success():
89
- if task_completion_hold_count > 0:
90
- task_completion_hold_count -= 1 # latched state, decrement count
91
- else:
92
- task_completion_hold_count = 10 # reset count on first success timestep
93
- else:
94
- task_completion_hold_count = -1 # null the counter if there's no success
95
-
96
- print(count)
97
- # cleanup for end of data collection episodes
98
- if not saving:
99
- remove_directory.append(env.ep_directory.split("/")[-1])
100
- env.close()
101
- return saving
102
-
103
-
104
- def gather_demonstrations_as_hdf5(
105
- directory, out_dir, env_info, args, remove_directory=[]
106
- ):
107
- """
108
- Gathers the demonstrations saved in @directory into a
109
- single hdf5 file.
110
-
111
- The strucure of the hdf5 file is as follows.
112
-
113
- data (group)
114
- date (attribute) - date of collection
115
- time (attribute) - time of collection
116
- repository_version (attribute) - repository version used during collection
117
- env (attribute) - environment name on which demos were collected
118
-
119
- demo1 (group) - every demonstration has a group
120
- model_file (attribute) - model xml string for demonstration
121
- states (dataset) - flattened mujoco states
122
- actions (dataset) - actions applied during demonstration
123
-
124
- demo2 (group)
125
- ...
126
-
127
- Args:
128
- directory (str): Path to the directory containing raw demonstrations.
129
- out_dir (str): Path to where to store the hdf5 file.
130
- env_info (str): JSON-encoded string containing environment information,
131
- including controller and robot info
132
- """
133
-
134
- hdf5_path = os.path.join(out_dir, "demo.hdf5")
135
- f = h5py.File(hdf5_path, "w")
136
-
137
- # store some metadata in the attributes of one group
138
- grp = f.create_group("data")
139
-
140
- num_eps = 0
141
- env_name = None # will get populated at some point
142
-
143
- for ep_directory in os.listdir(directory):
144
- # print(ep_directory)
145
- if ep_directory in remove_directory:
146
- # print("Skipping")
147
- continue
148
- state_paths = os.path.join(directory, ep_directory, "state_*.npz")
149
- states = []
150
- actions = []
151
-
152
- for state_file in sorted(glob(state_paths)):
153
- dic = np.load(state_file, allow_pickle=True)
154
- env_name = str(dic["env"])
155
-
156
- states.extend(dic["states"])
157
- for ai in dic["action_infos"]:
158
- actions.append(ai["actions"])
159
-
160
- if len(states) == 0:
161
- continue
162
-
163
- # Delete the first actions and the last state. This is because when the DataCollector wrapper
164
- # recorded the states and actions, the states were recorded AFTER playing that action.
165
- del states[-1]
166
- assert len(states) == len(actions)
167
-
168
- num_eps += 1
169
- ep_data_grp = grp.create_group("demo_{}".format(num_eps))
170
-
171
- # store model xml as an attribute
172
- xml_path = os.path.join(directory, ep_directory, "model.xml")
173
- with open(xml_path, "r") as f:
174
- xml_str = f.read()
175
- ep_data_grp.attrs["model_file"] = xml_str
176
-
177
- # write datasets for states and actions
178
- ep_data_grp.create_dataset("states", data=np.array(states))
179
- ep_data_grp.create_dataset("actions", data=np.array(actions))
180
-
181
- # write dataset attributes (metadata)
182
- now = datetime.datetime.now()
183
- grp.attrs["date"] = "{}-{}-{}".format(now.month, now.day, now.year)
184
- grp.attrs["time"] = "{}:{}:{}".format(now.hour, now.minute, now.second)
185
- grp.attrs["repository_version"] = suite.__version__
186
- grp.attrs["env"] = env_name
187
- grp.attrs["env_info"] = env_info
188
-
189
- grp.attrs["problem_info"] = json.dumps(problem_info)
190
- grp.attrs["bddl_file_name"] = args.bddl_file
191
- grp.attrs["bddl_file_content"] = str(open(args.bddl_file, "r", encoding="utf-8"))
192
-
193
- f.close()
194
-
195
-
196
- if __name__ == "__main__":
197
- # Arguments
198
- parser = argparse.ArgumentParser()
199
- parser.add_argument(
200
- "--directory",
201
- type=str,
202
- default="demonstration_data",
203
- )
204
- parser.add_argument(
205
- "--robots",
206
- nargs="+",
207
- type=str,
208
- default="Panda",
209
- help="Which robot(s) to use in the env",
210
- )
211
- parser.add_argument(
212
- "--config",
213
- type=str,
214
- default="single-arm-opposed",
215
- help="Specified environment configuration if necessary",
216
- )
217
- parser.add_argument(
218
- "--arm",
219
- type=str,
220
- default="right",
221
- help="Which arm to control (eg bimanual) 'right' or 'left'",
222
- )
223
- parser.add_argument(
224
- "--camera",
225
- type=str,
226
- default="agentview",
227
- help="Which camera to use for collecting demos",
228
- )
229
- parser.add_argument(
230
- "--controller",
231
- type=str,
232
- default="OSC_POSE",
233
- help="Choice of controller. Can be 'IK_POSE' or 'OSC_POSE'",
234
- )
235
- parser.add_argument("--device", type=str, default="spacemouse")
236
- parser.add_argument(
237
- "--pos-sensitivity",
238
- type=float,
239
- default=1.5,
240
- help="How much to scale position user inputs",
241
- )
242
- parser.add_argument(
243
- "--rot-sensitivity",
244
- type=float,
245
- default=1.0,
246
- help="How much to scale rotation user inputs",
247
- )
248
- parser.add_argument(
249
- "--num-demonstration",
250
- type=int,
251
- default=50,
252
- help="How much to scale rotation user inputs",
253
- )
254
- parser.add_argument("--bddl-file", type=str)
255
-
256
- parser.add_argument("--vendor-id", type=int, default=9583)
257
- parser.add_argument("--product-id", type=int, default=50734)
258
-
259
- args = parser.parse_args()
260
-
261
- # Get controller config
262
- controller_config = load_controller_config(default_controller=args.controller)
263
-
264
- # Create argument configuration
265
- config = {
266
- "robots": args.robots,
267
- "controller_configs": controller_config,
268
- }
269
-
270
- assert os.path.exists(args.bddl_file)
271
- problem_info = BDDLUtils.get_problem_info(args.bddl_file)
272
- # Check if we're using a multi-armed environment and use env_configuration argument if so
273
-
274
- # Create environment
275
- problem_name = problem_info["problem_name"]
276
- domain_name = problem_info["domain_name"]
277
- language_instruction = problem_info["language_instruction"]
278
- if "TwoArm" in problem_name:
279
- config["env_configuration"] = args.config
280
- print(language_instruction)
281
- env = TASK_MAPPING[problem_name](
282
- bddl_file_name=args.bddl_file,
283
- **config,
284
- has_renderer=True,
285
- has_offscreen_renderer=False,
286
- render_camera=args.camera,
287
- ignore_done=True,
288
- use_camera_obs=False,
289
- reward_shaping=True,
290
- control_freq=20,
291
- )
292
-
293
- # Wrap this with visualization wrapper
294
- env = VisualizationWrapper(env)
295
-
296
- # Grab reference to controller config and convert it to json-encoded string
297
- env_info = json.dumps(config)
298
-
299
- # wrap the environment with data collection wrapper
300
- tmp_directory = "demonstration_data/tmp/{}_ln_{}/{}".format(
301
- problem_name,
302
- language_instruction.replace(" ", "_").strip('""'),
303
- str(time.time()).replace(".", "_"),
304
- )
305
-
306
- env = DataCollectionWrapper(env, tmp_directory)
307
-
308
- # initialize device
309
- if args.device == "keyboard":
310
- from robosuite.devices import Keyboard
311
-
312
- device = Keyboard(
313
- pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity
314
- )
315
- env.viewer.add_keypress_callback("any", device.on_press)
316
- env.viewer.add_keyup_callback("any", device.on_release)
317
- env.viewer.add_keyrepeat_callback("any", device.on_press)
318
- elif args.device == "spacemouse":
319
- from robosuite.devices import SpaceMouse
320
-
321
- device = SpaceMouse(
322
- args.vendor_id,
323
- args.product_id,
324
- pos_sensitivity=args.pos_sensitivity,
325
- rot_sensitivity=args.rot_sensitivity,
326
- )
327
- else:
328
- raise Exception(
329
- "Invalid device choice: choose either 'keyboard' or 'spacemouse'."
330
- )
331
-
332
- # make a new timestamped directory
333
- t1, t2 = str(time.time()).split(".")
334
- new_dir = os.path.join(
335
- args.directory,
336
- f"{domain_name}_ln_{problem_name}_{t1}_{t2}_"
337
- + language_instruction.replace(" ", "_").strip('""'),
338
- )
339
-
340
- os.makedirs(new_dir)
341
-
342
- # collect demonstrations
343
-
344
- remove_directory = []
345
- i = 0
346
- while i < args.num_demonstration:
347
- print(i)
348
- saving = collect_human_trajectory(
349
- env, device, args.arm, args.config, problem_info, remove_directory
350
- )
351
- if saving:
352
- print(remove_directory)
353
- gather_demonstrations_as_hdf5(
354
- tmp_directory, new_dir, env_info, args, remove_directory
355
- )
356
- i += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/config_copy.py DELETED
@@ -1,20 +0,0 @@
1
- import os
2
- import shutil
3
- from libero.libero import get_libero_path
4
-
5
-
6
- def main():
7
- target_path = os.path.abspath(os.path.join("./", "configs"))
8
- print(f"Copying configs to {target_path}")
9
- if os.path.exists(target_path):
10
- response = input("The target directory already exists. Overwrite it? (y/n) ")
11
- if response.lower() != "y":
12
- return
13
- shutil.rmtree(target_path)
14
- shutil.copytree(
15
- os.path.join(get_libero_path("benchmark_root"), "../configs"), target_path
16
- )
17
-
18
-
19
- if __name__ == "__main__":
20
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/create_dataset.py DELETED
@@ -1,282 +0,0 @@
1
- import argparse
2
- import os
3
- from pathlib import Path
4
- import h5py
5
- import numpy as np
6
- import json
7
- import robosuite
8
- import robosuite.utils.transform_utils as T
9
- import robosuite.macros as macros
10
-
11
- import init_path
12
- import libero.libero.utils.utils as libero_utils
13
- import cv2
14
- from PIL import Image
15
- from robosuite.utils import camera_utils
16
-
17
- from libero.libero.envs import *
18
- from libero.libero import get_libero_path
19
-
20
- def main():
21
- parser = argparse.ArgumentParser()
22
- parser.add_argument("--demo-file", default="demo.hdf5")
23
-
24
- parser.add_argument(
25
- "--use-actions",
26
- action="store_true",
27
- )
28
- parser.add_argument("--use-camera-obs", action="store_true")
29
- parser.add_argument(
30
- "--dataset-path",
31
- type=str,
32
- default="datasets/",
33
- )
34
-
35
- parser.add_argument(
36
- "--dataset-name",
37
- type=str,
38
- default="training_set",
39
- )
40
-
41
- parser.add_argument("--no-proprio", action="store_true")
42
-
43
- parser.add_argument(
44
- "--use-depth",
45
- action="store_true",
46
- )
47
-
48
- args = parser.parse_args()
49
-
50
- hdf5_path = args.demo_file
51
- f = h5py.File(hdf5_path, "r")
52
- env_name = f["data"].attrs["env"]
53
-
54
- env_args = f["data"].attrs["env_info"]
55
- env_kwargs = json.loads(f["data"].attrs["env_info"])
56
-
57
- problem_info = json.loads(f["data"].attrs["problem_info"])
58
- problem_info["domain_name"]
59
- problem_name = problem_info["problem_name"]
60
- language_instruction = problem_info["language_instruction"]
61
-
62
- # list of all demonstrations episodes
63
- demos = list(f["data"].keys())
64
-
65
- bddl_file_name = f["data"].attrs["bddl_file_name"]
66
-
67
- bddl_file_dir = os.path.dirname(bddl_file_name)
68
- replace_bddl_prefix = "/".join(bddl_file_dir.split("bddl_files/")[:-1] + "bddl_files")
69
-
70
- hdf5_path = os.path.join(get_libero_path("datasets"), bddl_file_dir.split("bddl_files/")[-1].replace(".bddl", "_demo.hdf5"))
71
-
72
- output_parent_dir = Path(hdf5_path).parent
73
- output_parent_dir.mkdir(parents=True, exist_ok=True)
74
-
75
- h5py_f = h5py.File(hdf5_path, "w")
76
-
77
- grp = h5py_f.create_group("data")
78
-
79
- grp.attrs["env_name"] = env_name
80
- grp.attrs["problem_info"] = f["data"].attrs["problem_info"]
81
- grp.attrs["macros_image_convention"] = macros.IMAGE_CONVENTION
82
-
83
- libero_utils.update_env_kwargs(
84
- env_kwargs,
85
- bddl_file_name=bddl_file_name,
86
- has_renderer=not args.use_camera_obs,
87
- has_offscreen_renderer=args.use_camera_obs,
88
- ignore_done=True,
89
- use_camera_obs=args.use_camera_obs,
90
- camera_depths=args.use_depth,
91
- camera_names=[
92
- "robot0_eye_in_hand",
93
- "agentview",
94
- ],
95
- reward_shaping=True,
96
- control_freq=20,
97
- camera_heights=128,
98
- camera_widths=128,
99
- camera_segmentations=None,
100
- )
101
-
102
- grp.attrs["bddl_file_name"] = bddl_file_name
103
- grp.attrs["bddl_file_content"] = open(bddl_file_name, "r").read()
104
- print(grp.attrs["bddl_file_content"])
105
-
106
- env = TASK_MAPPING[problem_name](
107
- **env_kwargs,
108
- )
109
-
110
- env_args = {
111
- "type": 1,
112
- "env_name": env_name,
113
- "problem_name": problem_name,
114
- "bddl_file": f["data"].attrs["bddl_file_name"],
115
- "env_kwargs": env_kwargs,
116
- }
117
-
118
- grp.attrs["env_args"] = json.dumps(env_args)
119
- print(grp.attrs["env_args"])
120
- total_len = 0
121
- demos = demos
122
-
123
- cap_index = 5
124
-
125
- for (i, ep) in enumerate(demos):
126
- print("Playing back random episode... (press ESC to quit)")
127
-
128
- # # select an episode randomly
129
- # read the model xml, using the metadata stored in the attribute for this episode
130
- model_xml = f["data/{}".format(ep)].attrs["model_file"]
131
- reset_success = False
132
- while not reset_success:
133
- try:
134
- env.reset()
135
- reset_success = True
136
- except:
137
- continue
138
-
139
- model_xml = libero_utils.postprocess_model_xml(model_xml, {})
140
-
141
- if not args.use_camera_obs:
142
- env.viewer.set_camera(0)
143
-
144
- # load the flattened mujoco states
145
- states = f["data/{}/states".format(ep)][()]
146
- actions = np.array(f["data/{}/actions".format(ep)][()])
147
-
148
- num_actions = actions.shape[0]
149
-
150
- init_idx = 0
151
- env.reset_from_xml_string(model_xml)
152
- env.sim.reset()
153
- env.sim.set_state_from_flattened(states[init_idx])
154
- env.sim.forward()
155
- model_xml = env.sim.model.get_xml()
156
-
157
- ee_states = []
158
- gripper_states = []
159
- joint_states = []
160
- robot_states = []
161
-
162
- agentview_images = []
163
- eye_in_hand_images = []
164
-
165
- agentview_depths = []
166
- eye_in_hand_depths = []
167
-
168
- agentview_seg = {0: [], 1: [], 2: [], 3: [], 4: []}
169
-
170
- rewards = []
171
- dones = []
172
-
173
- valid_index = []
174
-
175
- for j, action in enumerate(actions):
176
-
177
- obs, reward, done, info = env.step(action)
178
-
179
- if j < num_actions - 1:
180
- # ensure that the actions deterministically lead to the same recorded states
181
- state_playback = env.sim.get_state().flatten()
182
- # assert(np.all(np.equal(states[j + 1], state_playback)))
183
- err = np.linalg.norm(states[j + 1] - state_playback)
184
-
185
- if err > 0.01:
186
- print(
187
- f"[warning] playback diverged by {err:.2f} for ep {ep} at step {j}"
188
- )
189
-
190
- # Skip recording because the force sensor is not stable in
191
- # the beginning
192
- if j < cap_index:
193
- continue
194
-
195
- valid_index.append(j)
196
-
197
- if not args.no_proprio:
198
- if "robot0_gripper_qpos" in obs:
199
- gripper_states.append(obs["robot0_gripper_qpos"])
200
-
201
- joint_states.append(obs["robot0_joint_pos"])
202
-
203
- ee_states.append(
204
- np.hstack(
205
- (
206
- obs["robot0_eef_pos"],
207
- T.quat2axisangle(obs["robot0_eef_quat"]),
208
- )
209
- )
210
- )
211
-
212
- robot_states.append(env.get_robot_state_vector(obs))
213
-
214
- if args.use_camera_obs:
215
-
216
- if args.use_depth:
217
- agentview_depths.append(obs["agentview_depth"])
218
- eye_in_hand_depths.append(obs["robot0_eye_in_hand_depth"])
219
-
220
- agentview_images.append(obs["agentview_image"])
221
- eye_in_hand_images.append(obs["robot0_eye_in_hand_image"])
222
- else:
223
- env.render()
224
-
225
- # end of one trajectory
226
- states = states[valid_index]
227
- actions = actions[valid_index]
228
- dones = np.zeros(len(actions)).astype(np.uint8)
229
- dones[-1] = 1
230
- rewards = np.zeros(len(actions)).astype(np.uint8)
231
- rewards[-1] = 1
232
- print(len(actions), len(agentview_images))
233
- assert len(actions) == len(agentview_images)
234
- print(len(actions))
235
-
236
- ep_data_grp = grp.create_group(f"demo_{i}")
237
-
238
- obs_grp = ep_data_grp.create_group("obs")
239
- if not args.no_proprio:
240
- obs_grp.create_dataset(
241
- "gripper_states", data=np.stack(gripper_states, axis=0)
242
- )
243
- obs_grp.create_dataset("joint_states", data=np.stack(joint_states, axis=0))
244
- obs_grp.create_dataset("ee_states", data=np.stack(ee_states, axis=0))
245
- obs_grp.create_dataset("ee_pos", data=np.stack(ee_states, axis=0)[:, :3])
246
- obs_grp.create_dataset("ee_ori", data=np.stack(ee_states, axis=0)[:, 3:])
247
-
248
- obs_grp.create_dataset("agentview_rgb", data=np.stack(agentview_images, axis=0))
249
- obs_grp.create_dataset(
250
- "eye_in_hand_rgb", data=np.stack(eye_in_hand_images, axis=0)
251
- )
252
- if args.use_depth:
253
- obs_grp.create_dataset(
254
- "agentview_depth", data=np.stack(agentview_depths, axis=0)
255
- )
256
- obs_grp.create_dataset(
257
- "eye_in_hand_depth", data=np.stack(eye_in_hand_depths, axis=0)
258
- )
259
-
260
- ep_data_grp.create_dataset("actions", data=actions)
261
- ep_data_grp.create_dataset("states", data=states)
262
- ep_data_grp.create_dataset("robot_states", data=np.stack(robot_states, axis=0))
263
- ep_data_grp.create_dataset("rewards", data=rewards)
264
- ep_data_grp.create_dataset("dones", data=dones)
265
- ep_data_grp.attrs["num_samples"] = len(agentview_images)
266
- ep_data_grp.attrs["model_file"] = model_xml
267
- ep_data_grp.attrs["init_state"] = states[init_idx]
268
- total_len += len(agentview_images)
269
-
270
- grp.attrs["num_demos"] = len(demos)
271
- grp.attrs["total"] = total_len
272
- env.close()
273
-
274
- h5py_f.close()
275
- f.close()
276
-
277
- print("The created dataset is saved in the following path: ")
278
- print(hdf5_path)
279
-
280
-
281
- if __name__ == "__main__":
282
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/create_libero_task_example.py DELETED
@@ -1,107 +0,0 @@
1
- """This is a standalone file for create a task in libero."""
2
- import numpy as np
3
-
4
- from libero.libero.utils.bddl_generation_utils import (
5
- get_xy_region_kwargs_list_from_regions_info,
6
- )
7
- from libero.libero.utils.mu_utils import register_mu, InitialSceneTemplates
8
- from libero.libero.utils.task_generation_utils import (
9
- register_task_info,
10
- get_task_info,
11
- generate_bddl_from_task_info,
12
- )
13
-
14
-
15
- @register_mu(scene_type="kitchen")
16
- class KitchenScene1(InitialSceneTemplates):
17
- def __init__(self):
18
-
19
- fixture_num_info = {
20
- "kitchen_table": 1,
21
- "wooden_cabinet": 1,
22
- }
23
-
24
- object_num_info = {
25
- "akita_black_bowl": 1,
26
- "plate": 1,
27
- }
28
-
29
- super().__init__(
30
- workspace_name="kitchen_table",
31
- fixture_num_info=fixture_num_info,
32
- object_num_info=object_num_info,
33
- )
34
-
35
- def define_regions(self):
36
- self.regions.update(
37
- self.get_region_dict(
38
- region_centroid_xy=[0.0, -0.30],
39
- region_name="wooden_cabinet_init_region",
40
- target_name=self.workspace_name,
41
- region_half_len=0.01,
42
- yaw_rotation=(np.pi, np.pi),
43
- )
44
- )
45
-
46
- self.regions.update(
47
- self.get_region_dict(
48
- region_centroid_xy=[0.0, 0.0],
49
- region_name="akita_black_bowl_init_region",
50
- target_name=self.workspace_name,
51
- region_half_len=0.025,
52
- )
53
- )
54
-
55
- self.regions.update(
56
- self.get_region_dict(
57
- region_centroid_xy=[0.0, 0.25],
58
- region_name="plate_init_region",
59
- target_name=self.workspace_name,
60
- region_half_len=0.025,
61
- )
62
- )
63
- self.xy_region_kwargs_list = get_xy_region_kwargs_list_from_regions_info(
64
- self.regions
65
- )
66
-
67
- @property
68
- def init_states(self):
69
- states = [
70
- ("On", "akita_black_bowl_1", "kitchen_table_akita_black_bowl_init_region"),
71
- ("On", "plate_1", "kitchen_table_plate_init_region"),
72
- ("On", "wooden_cabinet_1", "kitchen_table_wooden_cabinet_init_region"),
73
- ]
74
- return states
75
-
76
-
77
- def main():
78
- # kitchen_scene_1
79
- scene_name = "kitchen_scene1"
80
- language = "Your Language 1"
81
- register_task_info(
82
- language,
83
- scene_name=scene_name,
84
- objects_of_interest=["wooden_cabinet_1", "akita_black_bowl_1"],
85
- goal_states=[
86
- ("Open", "wooden_cabinet_1_top_region"),
87
- ("In", "akita_black_bowl_1", "wooden_cabinet_1_top_region"),
88
- ],
89
- )
90
-
91
- scene_name = "kitchen_scene1"
92
- language = "Your Language 2"
93
- register_task_info(
94
- language,
95
- scene_name=scene_name,
96
- objects_of_interest=["wooden_cabinet_1", "akita_black_bowl_1"],
97
- goal_states=[
98
- ("Open", "wooden_cabinet_1_top_region"),
99
- ("In", "akita_black_bowl_1", "wooden_cabinet_1_bottom_region"),
100
- ],
101
- )
102
- bddl_file_names, failures = generate_bddl_from_task_info()
103
- print(bddl_file_names)
104
-
105
-
106
- if __name__ == "__main__":
107
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/create_template.py DELETED
@@ -1,96 +0,0 @@
1
- """
2
- This is a script for creating various files frrom templates. This is to ease the process for users who want to extend LIBERO, creating new tasks. You would still need to make necessary changes based on the template to serve your own need, but the hope is that we save you much time by providing the necessar templates.
3
- """
4
-
5
- import os
6
- import xml.etree.ElementTree as ET
7
-
8
- from libero.libero import get_libero_path
9
- from libero.libero.envs.textures import get_texture_file_list
10
-
11
-
12
- def create_problem_class_from_file(class_name):
13
- template_source_file = os.path.join(
14
- get_libero_path("benchmark_root"), "../../templates/problem_class_template.py"
15
- )
16
- with open(template_source_file, "r") as f:
17
- lines = f.readlines()
18
- new_lines = []
19
- for line in lines:
20
- if "YOUR_CLASS_NAME" in line:
21
- line = line.replace("YOUR_CLASS_NAME", class_name)
22
- new_lines.append(line)
23
- with open(f"{class_name.lower()}.py", "w") as f:
24
- f.writelines(new_lines)
25
- print(f"Creating class {class_name} at the file: {class_name.lower()}.py")
26
-
27
-
28
- def create_scene_xml_file(scene_name):
29
- """This is just an example for you to jump start. For more advanced editing, you will need to figure out yourself. You can take a look at all the available xml files for reference."""
30
- template_source_file = os.path.join(
31
- get_libero_path("benchmark_root"), "../../templates/scene_template.xml"
32
- )
33
- parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True))
34
- tree = ET.parse(template_source_file, parser)
35
- root = tree.getroot()
36
-
37
- basic_elements = [
38
- ("Floor", "texplane"),
39
- ("Table", "tex-table"),
40
- ("Table legs", "tex-table-legs"),
41
- ("Walls", "tex-wall"),
42
- ]
43
-
44
- for (element_name, texture_name) in basic_elements:
45
- element = root.findall('.//texture[@name="{}"]'.format(texture_name))[0]
46
- type = None
47
- if "floor" in element_name.lower():
48
- type = "floor"
49
- elif "table" in element_name.lower():
50
- type = "table"
51
- elif "wall" in element_name.lower():
52
- type = "wall"
53
- # If you want to change the path of the texture file, you can pass in texture_path variable to change it.
54
- texture_list = get_texture_file_list(type=type, texture_path="../")
55
- for i, (texture_name, texture_file_path) in enumerate(texture_list):
56
- print(f"[{i}]: {texture_name}")
57
- choice = int(input(f"Please select which texture to use for {element_name}: "))
58
- element.set("file", texture_list[choice][1])
59
- tree.write(f"{scene_name}.xml", encoding="utf-8")
60
- print(f"Creating scene {scene_name} at the file: {scene_name}.xml")
61
- print(
62
- "\n [Notice] The texture fiile paths are specified in the relative path format assuming your scene xml will be placed in the path libero/libero/assets/scenes/. "
63
- )
64
- return
65
-
66
-
67
- def main():
68
- # use keyboard to select which file to create
69
- choices = [
70
- "problem_class",
71
- "scene",
72
- "object",
73
- "arena",
74
- ]
75
-
76
- for i, choice in enumerate(choices):
77
- print(f"[{i}]: {choice}")
78
- choice = int(input("Please select which file to create: "))
79
-
80
- if choices[choice] == "problem_class":
81
- # Ask user to specify the class name
82
- class_name = input("Please specify the class name: ")
83
- assert " " not in class_name, "space is not allowed in the naming"
84
- parts = class_name.split("_")
85
- class_name = "_".join([part.lower().capitalize() for part in parts])
86
- create_problem_class_from_file(class_name)
87
- elif choices[choice] == "scene":
88
- # Ask user to specify the scene name
89
- scene_name = input("Please specify the scene name: ")
90
- scene_name = scene_name.lower()
91
- assert " " not in scene_name, "space is not allowed in the naming"
92
- create_scene_xml_file(scene_name)
93
-
94
-
95
- if __name__ == "__main__":
96
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/get_affordance_info.py DELETED
@@ -1,9 +0,0 @@
1
- # This is an example script to get all the affordance information specified in xml files.
2
-
3
- import init_path
4
- from libero.libero.envs.objects import OBJECTS_DICT
5
- from libero.libero.utils.object_utils import get_affordance_regions
6
-
7
- affordances = get_affordance_regions(OBJECTS_DICT)
8
-
9
- print(affordances)
 
 
 
 
 
 
 
 
 
 
scripts/get_dataset_info.py DELETED
@@ -1,156 +0,0 @@
1
- """
2
- Helper script to report dataset information. By default, will print trajectory length statistics,
3
- the maximum and minimum action element in the dataset, filter keys present, environment
4
- metadata, and the structure of the first demonstration. If --verbose is passed, it will
5
- report the exact demo keys under each filter key, and the structure of all demonstrations
6
- (not just the first one).
7
-
8
- Args:
9
- dataset (str): path to hdf5 dataset
10
-
11
- filter_key (str): if provided, report statistics on the subset of trajectories
12
- in the file that correspond to this filter key
13
-
14
- verbose (bool): if flag is provided, print more details, like the structure of all
15
- demonstrations (not just the first one)
16
-
17
- Example usage:
18
-
19
- # run script on example hdf5 packaged with repository
20
- python get_dataset_info.py --dataset ../../tests/assets/test.hdf5
21
-
22
- # run script only on validation data
23
- python get_dataset_info.py --dataset ../../tests/assets/test.hdf5 --filter_key valid
24
- """
25
- import h5py
26
- import json
27
- import argparse
28
- import numpy as np
29
-
30
- if __name__ == "__main__":
31
- parser = argparse.ArgumentParser()
32
- parser.add_argument(
33
- "--dataset",
34
- type=str,
35
- help="path to hdf5 dataset",
36
- )
37
- parser.add_argument(
38
- "--filter_key",
39
- type=str,
40
- default=None,
41
- help="(optional) if provided, report statistics on the subset of trajectories \
42
- in the file that correspond to this filter key",
43
- )
44
- parser.add_argument(
45
- "--verbose",
46
- action="store_true",
47
- help="verbose output",
48
- )
49
- args = parser.parse_args()
50
-
51
- # extract demonstration list from file
52
- filter_key = args.filter_key
53
- all_filter_keys = None
54
- f = h5py.File(args.dataset, "r")
55
- if filter_key is not None:
56
- # use the demonstrations from the filter key instead
57
- print("NOTE: using filter key {}".format(filter_key))
58
- demos = sorted(
59
- [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])]
60
- )
61
- else:
62
- # use all demonstrations
63
- demos = sorted(list(f["data"].keys()))
64
-
65
- # extract filter key information
66
- if "mask" in f:
67
- all_filter_keys = {}
68
- for fk in f["mask"]:
69
- fk_demos = sorted(
70
- [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])]
71
- )
72
- all_filter_keys[fk] = fk_demos
73
-
74
- # put demonstration list in increasing episode order
75
- inds = np.argsort([int(elem[5:]) for elem in demos])
76
- demos = [demos[i] for i in inds]
77
-
78
- # extract length of each trajectory in the file
79
- traj_lengths = []
80
- action_min = np.inf
81
- action_max = -np.inf
82
- for ep in demos:
83
- traj_lengths.append(f["data/{}/actions".format(ep)].shape[0])
84
- action_min = min(action_min, np.min(f["data/{}/actions".format(ep)][()]))
85
- action_max = max(action_max, np.max(f["data/{}/actions".format(ep)][()]))
86
- traj_lengths = np.array(traj_lengths)
87
-
88
- problem_info = json.loads(f["data"].attrs["problem_info"])
89
-
90
- language_instruction = "".join(problem_info["language_instruction"])
91
- # report statistics on the data
92
- print("")
93
- print("total transitions: {}".format(np.sum(traj_lengths)))
94
- print("total trajectories: {}".format(traj_lengths.shape[0]))
95
- print("traj length mean: {}".format(np.mean(traj_lengths)))
96
- print("traj length std: {}".format(np.std(traj_lengths)))
97
- print("traj length min: {}".format(np.min(traj_lengths)))
98
- print("traj length max: {}".format(np.max(traj_lengths)))
99
- print("action min: {}".format(action_min))
100
- print("action max: {}".format(action_max))
101
- print("language instruction: {}".format(language_instruction.strip('"')))
102
- print("")
103
- print("==== Filter Keys ====")
104
- if all_filter_keys is not None:
105
- for fk in all_filter_keys:
106
- print("filter key {} with {} demos".format(fk, len(all_filter_keys[fk])))
107
- else:
108
- print("no filter keys")
109
- print("")
110
- if args.verbose:
111
- if all_filter_keys is not None:
112
- print("==== Filter Key Contents ====")
113
- for fk in all_filter_keys:
114
- print(
115
- "filter_key {} with {} demos: {}".format(
116
- fk, len(all_filter_keys[fk]), all_filter_keys[fk]
117
- )
118
- )
119
- print("")
120
- env_meta = json.loads(f["data"].attrs["env_args"])
121
- print("==== Env Meta ====")
122
- print(json.dumps(env_meta, indent=4))
123
- print("")
124
-
125
- print("==== Dataset Structure ====")
126
- for ep in demos:
127
- print(
128
- "episode {} with {} transitions".format(
129
- ep, f["data/{}".format(ep)].attrs["num_samples"]
130
- )
131
- )
132
- for k in f["data/{}".format(ep)]:
133
- if k in ["obs", "next_obs"]:
134
- print(" key: {}".format(k))
135
- for obs_k in f["data/{}/{}".format(ep, k)]:
136
- shape = f["data/{}/{}/{}".format(ep, k, obs_k)].shape
137
- print(
138
- " observation key {} with shape {}".format(obs_k, shape)
139
- )
140
- elif isinstance(f["data/{}/{}".format(ep, k)], h5py.Dataset):
141
- key_shape = f["data/{}/{}".format(ep, k)].shape
142
- print(" key: {} with shape {}".format(k, key_shape))
143
-
144
- if not args.verbose:
145
- break
146
-
147
- f.close()
148
-
149
- # maybe display error message
150
- print("")
151
- if (action_min < -1.0) or (action_max > 1.0):
152
- raise Exception(
153
- "Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format(
154
- action_min, action_max
155
- )
156
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/init_path.py DELETED
@@ -1,5 +0,0 @@
1
- import sys
2
- import os
3
-
4
- path = os.path.dirname(os.path.realpath(__file__))
5
- sys.path.insert(0, os.path.join(path, "../"))
 
 
 
 
 
 
scripts/libero_100_collect_demonstrations.py DELETED
@@ -1,372 +0,0 @@
1
- """
2
- Modified from robosuite example scripts.
3
- A script to collect a batch of human demonstrations that can be used
4
- to generate a learning curriculum (see `demo_learning_curriculum.py`).
5
-
6
- The demonstrations can be played back using the `playback_demonstrations_from_pkl.py`
7
- script.
8
-
9
- """
10
-
11
- import argparse
12
- import cv2
13
- import datetime
14
- import h5py
15
- import init_path
16
- import json
17
- import numpy as np
18
- import os
19
- import robosuite as suite
20
- import time
21
- from glob import glob
22
- from robosuite import load_controller_config
23
- from robosuite.wrappers import DataCollectionWrapper, VisualizationWrapper
24
- from robosuite.utils.input_utils import input2action
25
-
26
-
27
- import libero.libero.envs.bddl_utils as BDDLUtils
28
- from libero.libero.envs import *
29
- from termcolor import colored
30
-
31
-
32
- def collect_human_trajectory(
33
- env, device, arm, env_configuration, problem_info, remove_directory=[]
34
- ):
35
- """
36
- Use the device (keyboard or SpaceNav 3D mouse) to collect a demonstration.
37
- The rollout trajectory is saved to files in npz format.
38
- Modify the DataCollectionWrapper wrapper to add new fields or change data formats.
39
-
40
- Args:
41
- env (MujocoEnv): environment to control
42
- device (Device): to receive controls from the device
43
- arms (str): which arm to control (eg bimanual) 'right' or 'left'
44
- env_configuration (str): specified environment configuration
45
- """
46
-
47
- reset_success = False
48
- while not reset_success:
49
- try:
50
- env.reset()
51
- reset_success = True
52
- except:
53
- continue
54
-
55
- # ID = 2 always corresponds to agentview
56
- env.render()
57
-
58
- task_completion_hold_count = (
59
- -1
60
- ) # counter to collect 10 timesteps after reaching goal
61
- device.start_control()
62
-
63
- # Loop until we get a reset from the input or the task completes
64
- saving = True
65
- count = 0
66
-
67
- while True:
68
- count += 1
69
- # Set active robot
70
- active_robot = (
71
- env.robots[0]
72
- if env_configuration == "bimanual"
73
- else env.robots[arm == "left"]
74
- )
75
-
76
- # Get the newest action
77
- action, grasp = input2action(
78
- device=device,
79
- robot=active_robot,
80
- active_arm=arm,
81
- env_configuration=env_configuration,
82
- )
83
-
84
- # If action is none, then this a reset so we should break
85
- if action is None:
86
- print("Break")
87
- saving = False
88
- break
89
- # Run environment step
90
-
91
- env.step(action)
92
- env.render()
93
- # Also break if we complete the task
94
- if task_completion_hold_count == 0:
95
- break
96
-
97
- # state machine to check for having a success for 10 consecutive timesteps
98
- if env._check_success():
99
- if task_completion_hold_count > 0:
100
- task_completion_hold_count -= 1 # latched state, decrement count
101
- else:
102
- task_completion_hold_count = 10 # reset count on first success timestep
103
- else:
104
- task_completion_hold_count = -1 # null the counter if there's no success
105
-
106
- print(count)
107
- # cleanup for end of data collection episodes
108
- if not saving:
109
- remove_directory.append(env.ep_directory.split("/")[-1])
110
- env.close()
111
- return saving
112
-
113
-
114
- def gather_demonstrations_as_hdf5(
115
- directory, out_dir, env_info, args, remove_directory=[]
116
- ):
117
- """
118
- Gathers the demonstrations saved in @directory into a
119
- single hdf5 file.
120
-
121
- The strucure of the hdf5 file is as follows.
122
-
123
- data (group)
124
- date (attribute) - date of collection
125
- time (attribute) - time of collection
126
- repository_version (attribute) - repository version used during collection
127
- env (attribute) - environment name on which demos were collected
128
-
129
- demo1 (group) - every demonstration has a group
130
- model_file (attribute) - model xml string for demonstration
131
- states (dataset) - flattened mujoco states
132
- actions (dataset) - actions applied during demonstration
133
-
134
- demo2 (group)
135
- ...
136
-
137
- Args:
138
- directory (str): Path to the directory containing raw demonstrations.
139
- out_dir (str): Path to where to store the hdf5 file.
140
- env_info (str): JSON-encoded string containing environment information,
141
- including controller and robot info
142
- """
143
-
144
- hdf5_path = os.path.join(out_dir, "demo.hdf5")
145
- f = h5py.File(hdf5_path, "w")
146
-
147
- # store some metadata in the attributes of one group
148
- grp = f.create_group("data")
149
-
150
- num_eps = 0
151
- env_name = None # will get populated at some point
152
-
153
- for ep_directory in os.listdir(directory):
154
- # print(ep_directory)
155
- if ep_directory in remove_directory:
156
- # print("Skipping")
157
- continue
158
- state_paths = os.path.join(directory, ep_directory, "state_*.npz")
159
- states = []
160
- actions = []
161
-
162
- for state_file in sorted(glob(state_paths)):
163
- dic = np.load(state_file, allow_pickle=True)
164
- env_name = str(dic["env"])
165
-
166
- states.extend(dic["states"])
167
- for ai in dic["action_infos"]:
168
- actions.append(ai["actions"])
169
-
170
- if len(states) == 0:
171
- continue
172
-
173
- # Delete the first actions and the last state. This is because when the DataCollector wrapper
174
- # recorded the states and actions, the states were recorded AFTER playing that action.
175
- del states[-1]
176
- assert len(states) == len(actions)
177
-
178
- num_eps += 1
179
- ep_data_grp = grp.create_group("demo_{}".format(num_eps))
180
-
181
- # store model xml as an attribute
182
- xml_path = os.path.join(directory, ep_directory, "model.xml")
183
- with open(xml_path, "r") as f:
184
- xml_str = f.read()
185
- ep_data_grp.attrs["model_file"] = xml_str
186
-
187
- # write datasets for states and actions
188
- ep_data_grp.create_dataset("states", data=np.array(states))
189
- ep_data_grp.create_dataset("actions", data=np.array(actions))
190
-
191
- # write dataset attributes (metadata)
192
- now = datetime.datetime.now()
193
- grp.attrs["date"] = "{}-{}-{}".format(now.month, now.day, now.year)
194
- grp.attrs["time"] = "{}:{}:{}".format(now.hour, now.minute, now.second)
195
- grp.attrs["repository_version"] = suite.__version__
196
- grp.attrs["env"] = env_name
197
- grp.attrs["env_info"] = env_info
198
-
199
- grp.attrs["problem_info"] = json.dumps(problem_info)
200
- grp.attrs["bddl_file_name"] = args.bddl_file
201
- grp.attrs["bddl_file_content"] = str(open(args.bddl_file, "r", encoding="utf-8"))
202
-
203
- f.close()
204
-
205
-
206
- if __name__ == "__main__":
207
- # Arguments
208
- parser = argparse.ArgumentParser()
209
- parser.add_argument(
210
- "--directory",
211
- type=str,
212
- default="demonstration_data",
213
- )
214
- parser.add_argument(
215
- "--robots",
216
- nargs="+",
217
- type=list,
218
- default=["Panda"],
219
- help="Which robot(s) to use in the env",
220
- )
221
- parser.add_argument(
222
- "--config",
223
- type=str,
224
- default="single-arm-opposed",
225
- help="Specified environment configuration if necessary",
226
- )
227
- parser.add_argument(
228
- "--arm",
229
- type=str,
230
- default="right",
231
- help="Which arm to control (eg bimanual) 'right' or 'left'",
232
- )
233
- parser.add_argument(
234
- "--camera",
235
- type=str,
236
- default="agentview",
237
- help="Which camera to use for collecting demos",
238
- )
239
- parser.add_argument(
240
- "--controller",
241
- type=str,
242
- default="OSC_POSE",
243
- help="Choice of controller. Can be 'IK_POSE' or 'OSC_POSE'",
244
- )
245
- parser.add_argument("--device", type=str, default="spacemouse")
246
- parser.add_argument(
247
- "--pos-sensitivity",
248
- type=float,
249
- default=1.5,
250
- help="How much to scale position user inputs",
251
- )
252
- parser.add_argument(
253
- "--rot-sensitivity",
254
- type=float,
255
- default=1.5,
256
- help="How much to scale rotation user inputs",
257
- )
258
- parser.add_argument(
259
- "--num-demonstration",
260
- type=int,
261
- default=50,
262
- help="How much to scale rotation user inputs",
263
- )
264
- parser.add_argument("--bddl-file", type=str, default=None)
265
- parser.add_argument("--task-id", type=int)
266
-
267
- parser.add_argument("--vendor-id", type=int, default=9583)
268
- parser.add_argument("--product-id", type=int, default=50734)
269
-
270
- args = parser.parse_args()
271
-
272
- # Get controller config
273
- controller_config = load_controller_config(default_controller=args.controller)
274
-
275
- # Create argument configuration
276
- config = {
277
- "robots": args.robots,
278
- "controller_configs": controller_config,
279
- }
280
-
281
- assert os.path.exists(args.bddl_file)
282
- problem_info = BDDLUtils.get_problem_info(args.bddl_file)
283
- # Check if we're using a multi-armed environment and use env_configuration argument if so
284
-
285
- # Create environment
286
- problem_name = problem_info["problem_name"]
287
- domain_name = problem_info["domain_name"]
288
- language_instruction = problem_info["language_instruction"]
289
- text = colored(language_instruction, "red", attrs=["bold"])
290
- print("Goal of the following task: ", text)
291
- instruction = colored("Hit any key to proceed to data collection ...", "green", attrs=["reverse", "blink"])
292
- print(instruction)
293
- input()
294
-
295
- if "TwoArm" in problem_name:
296
- config["env_configuration"] = args.config
297
- print(language_instruction)
298
- env = TASK_MAPPING[problem_name](
299
- bddl_file_name=args.bddl_file,
300
- **config,
301
- has_renderer=True,
302
- has_offscreen_renderer=False,
303
- render_camera=args.camera,
304
- ignore_done=True,
305
- use_camera_obs=False,
306
- reward_shaping=True,
307
- control_freq=20,
308
- )
309
-
310
- # Wrap this with visualization wrapper
311
- env = VisualizationWrapper(env)
312
-
313
- # Grab reference to controller config and convert it to json-encoded string
314
- env_info = json.dumps(config)
315
-
316
- # wrap the environment with data collection wrapper
317
- tmp_directory = "demonstration_data/tmp/{}_ln_{}/{}".format(
318
- problem_name,
319
- language_instruction.replace(" ", "_").strip('""'),
320
- str(time.time()).replace(".", "_"),
321
- )
322
-
323
- env = DataCollectionWrapper(env, tmp_directory)
324
-
325
- # initialize device
326
- if args.device == "keyboard":
327
- from robosuite.devices import Keyboard
328
-
329
- device = Keyboard(
330
- pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity
331
- )
332
- env.viewer.add_keypress_callback("any", device.on_press)
333
- env.viewer.add_keyup_callback("any", device.on_release)
334
- env.viewer.add_keyrepeat_callback("any", device.on_press)
335
- elif args.device == "spacemouse":
336
- from robosuite.devices import SpaceMouse
337
-
338
- device = SpaceMouse(
339
- args.vendor_id,
340
- args.product_id,
341
- pos_sensitivity=args.pos_sensitivity,
342
- rot_sensitivity=args.rot_sensitivity,
343
- )
344
- else:
345
- raise Exception(
346
- "Invalid device choice: choose either 'keyboard' or 'spacemouse'."
347
- )
348
-
349
- # make a new timestamped directory
350
- t1, t2 = str(time.time()).split(".")
351
- new_dir = os.path.join(
352
- args.directory,
353
- f"{domain_name}_ln_{problem_name}_{t1}_{t2}_"
354
- + language_instruction.replace(" ", "_").strip('""'),
355
- )
356
- os.makedirs(new_dir)
357
-
358
- # collect demonstrations
359
-
360
- remove_directory = []
361
- i = 0
362
- while i < args.num_demonstration:
363
- print(i)
364
- saving = collect_human_trajectory(
365
- env, device, args.arm, args.config, problem_info, remove_directory
366
- )
367
- if saving:
368
- print(remove_directory)
369
- gather_demonstrations_as_hdf5(
370
- tmp_directory, new_dir, env_info, args, remove_directory
371
- )
372
- i += 1