THURCSCT commited on
Commit
3d375d1
·
verified ·
1 Parent(s): 3310b35

Delete folder benchmark_scripts with huggingface_hub

Browse files
benchmark_scripts/check_task_suites.py DELETED
@@ -1,117 +0,0 @@
1
- """
2
- This script is to test if users can successfully load all the environments, the benchmark initial states in their machines
3
- """
4
- import os
5
- from termcolor import colored
6
- import cv2
7
- import h5py
8
- import subprocess
9
- import shutil
10
- import numpy as np
11
-
12
- from pathlib import Path
13
-
14
- # import init_path
15
- from libero.libero import benchmark, get_libero_path
16
-
17
-
18
- # def render_task(task, bddl_file, init_states, demo_file):
19
- # env_args = {
20
- # "bddl_file_name": bddl_file,
21
- # "camera_heights": 128,
22
- # "camera_widths": 128
23
- # }
24
-
25
- # env = OffScreenRenderEnv(**env_args)
26
- # env.reset()
27
- # obs = env.set_init_state(init_states[0])
28
- # for _ in range(5):
29
- # obs, _, _, _ = env.step([0.] * 7)
30
- # images = [obs["agentview_image"]]
31
-
32
- # with h5py.File(demo_file, "r") as f:
33
- # states = f["data/demo_0/states"][()]
34
- # obs = env.set_init_state(states[-1])
35
-
36
- # images.append(obs["agentview_image"])
37
- # images = np.concatenate(images, axis=1)
38
- # cv2.imwrite(f"benchmark_tasks/{task.problem}-{task.language}.png", images[::-1, :, ::-1])
39
- # env.close()
40
-
41
-
42
- def main():
43
-
44
- benchmark_root_path = get_libero_path("benchmark_root")
45
- init_states_default_path = get_libero_path("init_states")
46
- datasets_default_path = get_libero_path("datasets")
47
- bddl_files_default_path = get_libero_path("bddl_files")
48
-
49
- # Check all the files
50
- task_tuples = []
51
- demo_files = []
52
- for benchmark_name in [
53
- "libero_object",
54
- "libero_goal",
55
- "libero_spatial",
56
- "libero_10",
57
- "libero_90",
58
- ]:
59
- benchmark_instance = benchmark.get_benchmark_dict()[benchmark_name]()
60
- num_tasks = benchmark_instance.get_num_tasks()
61
- # see how many tasks involved in the benchmark
62
- print(f"{num_tasks} tasks in the benchmark {benchmark_instance.name}: ")
63
-
64
- # Check if all the task names and their bddl file names
65
- task_names = benchmark_instance.get_task_names()
66
- print("The benchmark contains the following tasks:")
67
- for task_id in range(num_tasks):
68
- task_name = task_names[task_id]
69
- task = benchmark_instance.get_task(task_id)
70
- bddl_file = os.path.join(
71
- bddl_files_default_path, task.problem_folder, task.bddl_file
72
- )
73
- assert os.path.exists(bddl_file), f"{bddl_file} does not exist!"
74
- init_states_path = os.path.join(
75
- init_states_default_path, task.problem_folder, task.init_states_file
76
- )
77
- assert os.path.exists(
78
- init_states_path
79
- ), f"{init_states_path} does not exist!"
80
- demo_file = os.path.join(
81
- datasets_default_path,
82
- benchmark_instance.get_task_demonstration(task_id),
83
- )
84
- assert os.path.exists(demo_file), f"{demo_file} does not exist!"
85
- init_states = benchmark_instance.get_task_init_states(task_id)
86
- task_tuples.append((benchmark_name, task_id, bddl_file, demo_file))
87
- demo_files.append(demo_file)
88
-
89
- print(colored("All the files exist!", "green"))
90
- processes = []
91
- if os.path.exists("benchmark_tasks"):
92
- shutil.rmtree("benchmark_tasks")
93
-
94
- for i in range(len(task_tuples)):
95
- command = f"python benchmark_scripts/render_single_task.py --benchmark_name {task_tuples[i][0]} --task_id {task_tuples[i][1]} --bddl_file {task_tuples[i][2]} --demo_file {task_tuples[i][3]}"
96
- p = subprocess.Popen(command, shell=True)
97
- processes.append(p)
98
- if i % 10 == 9:
99
- for p in processes:
100
- p.wait()
101
- processes = []
102
-
103
- count = len(list(Path("benchmark_tasks").glob("*.png")))
104
- print(f"Expected 130 tasks, Rendered {count} tasks successfully.")
105
- if count < 130:
106
- print(colored("Some tasks failed to render!", "red"))
107
- for demo_file in demo_files:
108
- if not os.path.exists(
109
- os.path.join(
110
- "benchmark_tasks", demo_file.split("/")[-1].replace(".hdf5", ".png")
111
- )
112
- ):
113
- print(demo_file)
114
-
115
-
116
- if __name__ == "__main__":
117
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
benchmark_scripts/download_libero_datasets.py DELETED
@@ -1,46 +0,0 @@
1
- import init_path
2
- import argparse
3
- import os
4
-
5
- import libero.libero.utils.download_utils as download_utils
6
- from libero.libero import get_libero_path
7
-
8
-
9
- def parse_args():
10
- parser = argparse.ArgumentParser()
11
- parser.add_argument(
12
- "--download-dir",
13
- type=str,
14
- default=get_libero_path("datasets"),
15
- )
16
- parser.add_argument(
17
- "--datasets",
18
- type=str,
19
- choices=["all", "libero_goal", "libero_spatial", "libero_object", "libero_100"],
20
- default="all",
21
- )
22
- return parser.parse_args()
23
-
24
-
25
- def main():
26
-
27
- args = parse_args()
28
-
29
- # Ask users to specify the download directory of datasets
30
- os.makedirs(args.download_dir, exist_ok=True)
31
- print(f"Datasets downloaded to {args.download_dir}")
32
- print(f"Downloading {args.datasets} datasets")
33
-
34
- # If not, download
35
- download_utils.libero_dataset_download(
36
- download_dir=args.download_dir, datasets=args.datasets
37
- )
38
-
39
- # (TODO) If datasets exist, check if datasets are the same as benchmark
40
-
41
- # Check if datasets exist first
42
- download_utils.check_libero_dataset(download_dir=args.download_dir)
43
-
44
-
45
- if __name__ == "__main__":
46
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
benchmark_scripts/init_path.py DELETED
@@ -1,8 +0,0 @@
1
- import sys
2
- import os
3
-
4
- path = os.path.dirname(os.path.realpath(__file__))
5
- sys.path.insert(0, os.path.join(path, "../"))
6
-
7
- # import robosuite.utils.macros as macros
8
- # macros.IMAGE_CONVENTION = "opencv"
 
 
 
 
 
 
 
 
 
benchmark_scripts/render_single_task.py DELETED
@@ -1,82 +0,0 @@
1
- import os
2
- from termcolor import colored
3
- import cv2
4
- import h5py
5
- import argparse
6
- import numpy as np
7
-
8
- from libero.libero.envs import OffScreenRenderEnv
9
- from libero.libero import benchmark, get_libero_path
10
-
11
-
12
- def render_task(task, bddl_file, init_states, demo_file):
13
- env_args = {
14
- "bddl_file_name": bddl_file,
15
- "camera_heights": 128,
16
- "camera_widths": 128,
17
- }
18
-
19
- env = OffScreenRenderEnv(**env_args)
20
- env.reset()
21
- obs = env.set_init_state(init_states[0])
22
- for _ in range(5):
23
- obs, _, _, _ = env.step([0.0] * 7)
24
- images = [obs["agentview_image"]]
25
-
26
- with h5py.File(demo_file, "r") as f:
27
- states = f["data/demo_0/states"][()]
28
- obs = env.set_init_state(states[-1])
29
-
30
- images.append(obs["agentview_image"])
31
- images = np.concatenate(images, axis=1)
32
- cv2.imwrite(
33
- f"benchmark_tasks/{task.problem}-{task.language}.png", images[::-1, :, ::-1]
34
- )
35
- env.close()
36
-
37
-
38
- def main():
39
- parser = argparse.ArgumentParser()
40
- parser.add_argument("--benchmark_name", type=str)
41
- parser.add_argument("--task_id", type=int, default=0)
42
- parser.add_argument("--bddl_file", type=str)
43
- parser.add_argument("--demo_file", type=str)
44
- args = parser.parse_args()
45
-
46
- benchmark_name = args.benchmark_name
47
- task_id = args.task_id
48
- bddl_file = args.bddl_file
49
- demo_file = args.demo_file
50
-
51
- benchmark_instance = benchmark.get_benchmark_dict()[benchmark_name]()
52
- env_args = {
53
- "bddl_file_name": bddl_file,
54
- "camera_heights": 128,
55
- "camera_widths": 128,
56
- }
57
-
58
- os.makedirs("benchmark_tasks", exist_ok=True)
59
-
60
- task = benchmark_instance.get_task(task_id)
61
- init_states = benchmark_instance.get_task_init_states(task_id)
62
-
63
- env = OffScreenRenderEnv(**env_args)
64
- env.reset()
65
- obs = env.set_init_state(init_states[0])
66
- for _ in range(5):
67
- obs, _, _, _ = env.step([0.0] * 7)
68
- images = [obs["agentview_image"]]
69
-
70
- with h5py.File(demo_file, "r") as f:
71
- states = f["data/demo_0/states"][()]
72
- obs = env.set_init_state(states[-1])
73
-
74
- images.append(obs["agentview_image"])
75
- images = np.concatenate(images, axis=1)
76
- image_name = demo_file.split("/")[-1].replace(".hdf5", ".png")
77
- cv2.imwrite(f"benchmark_tasks/{image_name}", images[::-1, :, ::-1])
78
- env.close()
79
-
80
-
81
- if __name__ == "__main__":
82
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
benchmark_scripts/shasum_files.py DELETED
@@ -1,49 +0,0 @@
1
- import os
2
- import hashlib
3
- from pathlib import Path
4
-
5
-
6
- def shasum_file(file_path):
7
- if os.path.exists(file_path):
8
- with open(file_path, "rb") as f:
9
- data = f.read()
10
- sha_hash = hashlib.sha1(data).hexdigest()
11
- file_name = file_path.split("/")[-1]
12
- return {file_name: sha_hash}
13
- else:
14
- return None
15
-
16
-
17
- def shasum_datasets(download_dir="datasets"):
18
- dataset_shasum = {}
19
- for dataset_name in [
20
- "libero_object",
21
- "libero_goal",
22
- "libero_spatial",
23
- "libero_10",
24
- "libero_90",
25
- ]:
26
- dataset_dir = os.path.join(download_dir, dataset_name)
27
- if os.path.exists(dataset_dir):
28
- count = 0
29
- for path in Path(dataset_dir).glob("*.hdf5"):
30
- count += 1
31
- if not (
32
- (count == 10 and dataset_name != "libero_90")
33
- or (count == 90 and dataset_name == "libero_90")
34
- ):
35
- print("file count doesn't match")
36
- else:
37
- print("dataset not found")
38
- for path in Path(dataset_dir).glob("*.hdf5"):
39
- dataset_shasum.update(shasum_file(str(path)))
40
- print(dataset_shasum)
41
-
42
-
43
- # def shalsum_pretrained_models():
44
-
45
-
46
- # def shalsum_pretrained_policies():
47
-
48
-
49
- shasum_datasets()