RukawaY commited on
Commit
7040a76
·
verified ·
1 Parent(s): 1583834

Upload folder using huggingface_hub

Browse files
configs/ddppo_imagenav_gs_eval.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO ImageNav evaluation config for Gaussian Splatting scenes.
4
+ #
5
+ # Usage:
6
+ # bash scripts_gs/eval_imagenav.sh --ckpt /path/to/checkpoint.pth
7
+
8
+ defaults:
9
+ - /habitat: habitat_config_base
10
+ - /habitat/task: imagenav
11
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
12
+ - /habitat_baselines: habitat_baselines_rl_config_base
13
+ - _self_
14
+
15
+ # ---------- Dataset ----------
16
+ habitat:
17
+ dataset:
18
+ type: PointNav-v1
19
+ split: val
20
+ data_path: data/scene_datasets/gs_scenes/episodes/imagenav/{split}/{split}.json.gz
21
+ scenes_dir: data/scene_datasets
22
+
23
+ # ---------- Environment ----------
24
+ environment:
25
+ max_episode_steps: 1000
26
+
27
+ # ---------- Task ----------
28
+ task:
29
+ success_reward: 2.5
30
+ slack_reward: -1e-3
31
+
32
+ # ---------- Simulator ----------
33
+ simulator:
34
+ scene_dataset: data/scene_datasets/gs_scenes/val.scene_dataset_config.json
35
+ default_agent_navmesh: False
36
+ agents:
37
+ main_agent:
38
+ sim_sensors:
39
+ rgb_sensor:
40
+ width: 256
41
+ height: 256
42
+ depth_sensor:
43
+ width: 256
44
+ height: 256
45
+
46
+ # ---------- Baselines ----------
47
+ habitat_baselines:
48
+ verbose: False
49
+ evaluate: True
50
+ trainer_name: "ddppo"
51
+ torch_gpu_id: 0
52
+ tensorboard_dir: "tb"
53
+ video_dir: "video_dir"
54
+ test_episode_count: -1
55
+ num_environments: 1
56
+ force_torch_single_threaded: True
57
+ eval_ckpt_path_dir: ""
58
+ load_resume_state_config: False
59
+
60
+ eval:
61
+ video_option: []
62
+ use_ckpt_config: False
63
+
64
+ rl:
65
+ policy:
66
+ main_agent:
67
+ name: "PointNavResNetPolicy"
68
+
69
+ ppo:
70
+ clip_param: 0.2
71
+ ppo_epoch: 2
72
+ num_mini_batch: 2
73
+ value_loss_coef: 0.5
74
+ entropy_coef: 0.01
75
+ lr: 2.5e-4
76
+ eps: 1e-5
77
+ max_grad_norm: 0.2
78
+ num_steps: 64
79
+ use_gae: True
80
+ gamma: 0.99
81
+ tau: 0.95
82
+ use_linear_clip_decay: False
83
+ use_linear_lr_decay: False
84
+ reward_window_size: 50
85
+ use_normalized_advantage: False
86
+ hidden_size: 512
87
+ use_double_buffered_sampler: False
88
+
89
+ ddppo:
90
+ sync_frac: 0.6
91
+ distrib_backend: NCCL
92
+ pretrained_weights: ""
93
+ pretrained: False
94
+ pretrained_encoder: False
95
+ train_encoder: True
96
+ reset_critic: True
97
+ backbone: resnet50
98
+ rnn_type: LSTM
99
+ num_recurrent_layers: 2
configs/ddppo_imagenav_gs_train.yaml ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO ImageNav training config for Gaussian Splatting scenes.
4
+ # Uses ImageGoalSensor which renders goal images on-the-fly from the goal position.
5
+ #
6
+ # Usage:
7
+ # python scripts_gs/run_imagenav.py
8
+ # bash scripts_gs/train_imagenav.sh --output /path/to/output
9
+
10
+ defaults:
11
+ - /habitat: habitat_config_base
12
+ - /habitat/task: imagenav
13
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
14
+ - /habitat_baselines: habitat_baselines_rl_config_base
15
+ - _self_
16
+
17
+ # ---------- Dataset ----------
18
+ habitat:
19
+ dataset:
20
+ type: PointNav-v1
21
+ split: train
22
+ data_path: data/scene_datasets/gs_scenes/episodes/imagenav/{split}/{split}.json.gz
23
+ scenes_dir: data/scene_datasets
24
+
25
+ # ---------- Environment ----------
26
+ environment:
27
+ max_episode_steps: 1000
28
+
29
+ # ---------- Task ----------
30
+ task:
31
+ success_reward: 2.5
32
+ slack_reward: -1e-3
33
+
34
+ # ---------- Simulator ----------
35
+ simulator:
36
+ scene_dataset: data/scene_datasets/gs_scenes/train.scene_dataset_config.json
37
+ default_agent_navmesh: False
38
+ agents:
39
+ main_agent:
40
+ sim_sensors:
41
+ rgb_sensor:
42
+ width: 256
43
+ height: 256
44
+ depth_sensor:
45
+ width: 256
46
+ height: 256
47
+
48
+ # ---------- Baselines ----------
49
+ habitat_baselines:
50
+ verbose: False
51
+ trainer_name: "ddppo"
52
+ torch_gpu_id: 0
53
+ tensorboard_dir: "tb"
54
+ video_dir: "video_dir"
55
+ test_episode_count: -1
56
+ eval_ckpt_path_dir: "data/new_checkpoints"
57
+ num_environments: 4
58
+ checkpoint_folder: "data/new_checkpoints"
59
+ num_updates: -1
60
+ total_num_steps: 2.5e9
61
+ log_interval: 10
62
+ num_checkpoints: 100
63
+ force_torch_single_threaded: True
64
+
65
+ rl:
66
+ policy:
67
+ main_agent:
68
+ name: "PointNavResNetPolicy"
69
+
70
+ ppo:
71
+ clip_param: 0.2
72
+ ppo_epoch: 2
73
+ num_mini_batch: 2
74
+ value_loss_coef: 0.5
75
+ entropy_coef: 0.01
76
+ lr: 2.5e-4
77
+ eps: 1e-5
78
+ max_grad_norm: 0.2
79
+ num_steps: 64
80
+ use_gae: True
81
+ gamma: 0.99
82
+ tau: 0.95
83
+ use_linear_clip_decay: False
84
+ use_linear_lr_decay: False
85
+ reward_window_size: 50
86
+ use_normalized_advantage: False
87
+ hidden_size: 512
88
+ use_double_buffered_sampler: False
89
+
90
+ ddppo:
91
+ sync_frac: 0.6
92
+ distrib_backend: NCCL
93
+ pretrained_weights: ""
94
+ pretrained: False
95
+ pretrained_encoder: False
96
+ train_encoder: True
97
+ reset_critic: True
98
+ backbone: resnet50
99
+ rnn_type: LSTM
100
+ num_recurrent_layers: 2
configs/ddppo_objectnav_gs_eval.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO ObjectNav evaluation config for Gaussian Splatting scenes.
4
+ #
5
+ # Usage:
6
+ # bash scripts_gs/eval_objectnav.sh --ckpt /path/to/checkpoint.pth
7
+
8
+ defaults:
9
+ - /habitat: habitat_config_base
10
+ - /habitat/task: objectnav
11
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
12
+ - /habitat_baselines: habitat_baselines_rl_config_base
13
+ - _self_
14
+
15
+ # ---------- Dataset ----------
16
+ habitat:
17
+ dataset:
18
+ type: ObjectNav-v1
19
+ split: val
20
+ data_path: data/scene_datasets/gs_scenes/episodes/objectnav/{split}/{split}.json.gz
21
+ scenes_dir: data/scene_datasets
22
+
23
+ # ---------- Environment ----------
24
+ environment:
25
+ max_episode_steps: 500
26
+
27
+ # ---------- Task ----------
28
+ task:
29
+ success_reward: 2.5
30
+ slack_reward: -1e-3
31
+
32
+ # ---------- Simulator ----------
33
+ simulator:
34
+ scene_dataset: data/scene_datasets/gs_scenes/val.scene_dataset_config.json
35
+ default_agent_navmesh: False
36
+ agents:
37
+ main_agent:
38
+ sim_sensors:
39
+ rgb_sensor:
40
+ width: 256
41
+ height: 256
42
+ depth_sensor:
43
+ width: 256
44
+ height: 256
45
+
46
+ # ---------- Baselines ----------
47
+ habitat_baselines:
48
+ verbose: False
49
+ evaluate: True
50
+ trainer_name: "ddppo"
51
+ torch_gpu_id: 0
52
+ tensorboard_dir: "tb"
53
+ video_dir: "video_dir"
54
+ test_episode_count: -1
55
+ num_environments: 1
56
+ force_torch_single_threaded: True
57
+ eval_ckpt_path_dir: ""
58
+ load_resume_state_config: False
59
+
60
+ eval:
61
+ video_option: []
62
+ use_ckpt_config: False
63
+
64
+ rl:
65
+ policy:
66
+ main_agent:
67
+ name: "PointNavResNetPolicy"
68
+
69
+ ppo:
70
+ clip_param: 0.2
71
+ ppo_epoch: 4
72
+ num_mini_batch: 2
73
+ value_loss_coef: 0.5
74
+ entropy_coef: 0.01
75
+ lr: 2.5e-4
76
+ eps: 1e-5
77
+ max_grad_norm: 0.2
78
+ num_steps: 64
79
+ use_gae: True
80
+ gamma: 0.99
81
+ tau: 0.95
82
+ use_linear_clip_decay: False
83
+ use_linear_lr_decay: False
84
+ reward_window_size: 50
85
+ use_normalized_advantage: False
86
+ hidden_size: 512
87
+ use_double_buffered_sampler: False
88
+
89
+ ddppo:
90
+ sync_frac: 0.6
91
+ distrib_backend: NCCL
92
+ pretrained_weights: ""
93
+ pretrained: False
94
+ pretrained_encoder: False
95
+ train_encoder: True
96
+ reset_critic: True
97
+ backbone: resnet50
98
+ rnn_type: LSTM
99
+ num_recurrent_layers: 2
configs/ddppo_objectnav_gs_train.yaml ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO ObjectNav training config for Gaussian Splatting scenes.
4
+ # Uses SAM+CLIP detected objects with ObjectGoalSensor (category ID lookup).
5
+ #
6
+ # Usage:
7
+ # python scripts_gs/run_objectnav.py
8
+ # bash scripts_gs/train_objectnav.sh --output /path/to/output
9
+
10
+ defaults:
11
+ - /habitat: habitat_config_base
12
+ - /habitat/task: objectnav
13
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
14
+ - /habitat_baselines: habitat_baselines_rl_config_base
15
+ - _self_
16
+
17
+ # ---------- Dataset ----------
18
+ habitat:
19
+ dataset:
20
+ type: ObjectNav-v1
21
+ split: train
22
+ data_path: data/scene_datasets/gs_scenes/episodes/objectnav/{split}/{split}.json.gz
23
+ scenes_dir: data/scene_datasets
24
+
25
+ # ---------- Environment ----------
26
+ environment:
27
+ max_episode_steps: 500
28
+
29
+ # ---------- Task ----------
30
+ task:
31
+ success_reward: 2.5
32
+ slack_reward: -1e-3
33
+
34
+ # ---------- Simulator ----------
35
+ simulator:
36
+ scene_dataset: data/scene_datasets/gs_scenes/train.scene_dataset_config.json
37
+ default_agent_navmesh: False
38
+ agents:
39
+ main_agent:
40
+ sim_sensors:
41
+ rgb_sensor:
42
+ width: 256
43
+ height: 256
44
+ depth_sensor:
45
+ width: 256
46
+ height: 256
47
+
48
+ # ---------- Baselines ----------
49
+ habitat_baselines:
50
+ verbose: False
51
+ trainer_name: "ddppo"
52
+ torch_gpu_id: 0
53
+ tensorboard_dir: "tb"
54
+ video_dir: "video_dir"
55
+ test_episode_count: -1
56
+ eval_ckpt_path_dir: "data/new_checkpoints"
57
+ num_environments: 4
58
+ checkpoint_folder: "data/new_checkpoints"
59
+ num_updates: -1
60
+ total_num_steps: 2.5e9
61
+ log_interval: 10
62
+ num_checkpoints: 100
63
+ force_torch_single_threaded: True
64
+
65
+ rl:
66
+ policy:
67
+ main_agent:
68
+ name: "PointNavResNetPolicy"
69
+
70
+ ppo:
71
+ clip_param: 0.2
72
+ ppo_epoch: 4
73
+ num_mini_batch: 2
74
+ value_loss_coef: 0.5
75
+ entropy_coef: 0.01
76
+ lr: 2.5e-4
77
+ eps: 1e-5
78
+ max_grad_norm: 0.2
79
+ num_steps: 64
80
+ use_gae: True
81
+ gamma: 0.99
82
+ tau: 0.95
83
+ use_linear_clip_decay: False
84
+ use_linear_lr_decay: False
85
+ reward_window_size: 50
86
+ use_normalized_advantage: False
87
+ hidden_size: 512
88
+ use_double_buffered_sampler: False
89
+
90
+ ddppo:
91
+ sync_frac: 0.6
92
+ distrib_backend: NCCL
93
+ pretrained_weights: ""
94
+ pretrained: False
95
+ pretrained_encoder: False
96
+ train_encoder: True
97
+ reset_critic: True
98
+ backbone: resnet50
99
+ rnn_type: LSTM
100
+ num_recurrent_layers: 2
configs/ddppo_pointnav_gs_eval.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO PointNav evaluation config for Gaussian Splatting scenes.
4
+ # Self-contained: no extra config files beyond what habitat-lab ships.
5
+ #
6
+ # Usage (from habitat-gs/ root):
7
+ # python -m habitat_baselines.run \
8
+ # --config-name=pointnav/ddppo_pointnav_gs_eval
9
+
10
+ defaults:
11
+ - /habitat: habitat_config_base
12
+ - /habitat/task: pointnav
13
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
14
+ - /habitat_baselines: habitat_baselines_rl_config_base
15
+ - _self_
16
+
17
+ # ---------- Dataset ----------
18
+ habitat:
19
+ dataset:
20
+ type: PointNav-v1
21
+ split: val
22
+ data_path: data/scene_datasets/gs_scenes/episodes/pointnav/{split}/{split}.json.gz
23
+ scenes_dir: data/scene_datasets
24
+
25
+ # ---------- Environment ----------
26
+ environment:
27
+ max_episode_steps: 500
28
+
29
+ # ---------- Simulator ----------
30
+ simulator:
31
+ scene_dataset: data/scene_datasets/gs_scenes/val.scene_dataset_config.json
32
+ # GS scenes supply pre-built navmeshes; do not recompute from collision mesh
33
+ default_agent_navmesh: False
34
+ agents:
35
+ main_agent:
36
+ sim_sensors:
37
+ rgb_sensor:
38
+ width: 256
39
+ height: 256
40
+ depth_sensor:
41
+ width: 256
42
+ height: 256
43
+
44
+ # ---------- Baselines ----------
45
+ habitat_baselines:
46
+ verbose: False
47
+ evaluate: True
48
+ trainer_name: "ddppo"
49
+ torch_gpu_id: 0
50
+ tensorboard_dir: "tb"
51
+ video_dir: "video_dir"
52
+ test_episode_count: -1
53
+ num_environments: 1
54
+ force_torch_single_threaded: True
55
+ eval_ckpt_path_dir: ""
56
+ # Use this config instead of the checkpoint's saved training config
57
+ load_resume_state_config: False
58
+
59
+ eval:
60
+ video_option: []
61
+ use_ckpt_config: False
62
+
63
+ rl:
64
+ policy:
65
+ main_agent:
66
+ name: "PointNavResNetPolicy"
67
+
68
+ ppo:
69
+ clip_param: 0.2
70
+ ppo_epoch: 2
71
+ num_mini_batch: 2
72
+ value_loss_coef: 0.5
73
+ entropy_coef: 0.01
74
+ lr: 2.5e-4
75
+ eps: 1e-5
76
+ max_grad_norm: 0.2
77
+ num_steps: 128
78
+ use_gae: True
79
+ gamma: 0.99
80
+ tau: 0.95
81
+ use_linear_clip_decay: False
82
+ use_linear_lr_decay: False
83
+ reward_window_size: 50
84
+ use_normalized_advantage: False
85
+ hidden_size: 512
86
+ use_double_buffered_sampler: False
87
+
88
+ ddppo:
89
+ sync_frac: 0.6
90
+ distrib_backend: NCCL
91
+ pretrained_weights: ""
92
+ pretrained: False
93
+ pretrained_encoder: False
94
+ train_encoder: True
95
+ reset_critic: True
96
+ backbone: resnet50
97
+ rnn_type: LSTM
98
+ num_recurrent_layers: 2
configs/ddppo_pointnav_gs_train.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # DDPPO PointNav training config for Gaussian Splatting scenes.
4
+ # Self-contained: no extra config files beyond what habitat-lab ships.
5
+ #
6
+ # Usage (from habitat-gs/ root):
7
+ # python -m habitat_baselines.run \
8
+ # --config-name=pointnav/ddppo_pointnav_gs_train
9
+
10
+ defaults:
11
+ - /habitat: habitat_config_base
12
+ - /habitat/task: pointnav
13
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
14
+ - /habitat_baselines: habitat_baselines_rl_config_base
15
+ - _self_
16
+
17
+ # ---------- Dataset ----------
18
+ habitat:
19
+ dataset:
20
+ type: PointNav-v1
21
+ split: train
22
+ data_path: data/scene_datasets/gs_scenes/episodes/pointnav/{split}/{split}.json.gz
23
+ scenes_dir: data/scene_datasets
24
+
25
+ # ---------- Environment ----------
26
+ environment:
27
+ max_episode_steps: 500
28
+
29
+ # ---------- Simulator ----------
30
+ simulator:
31
+ scene_dataset: data/scene_datasets/gs_scenes/train.scene_dataset_config.json
32
+ # GS scenes supply pre-built navmeshes; do not recompute from collision mesh
33
+ default_agent_navmesh: False
34
+ agents:
35
+ main_agent:
36
+ sim_sensors:
37
+ rgb_sensor:
38
+ width: 256
39
+ height: 256
40
+ depth_sensor:
41
+ width: 256
42
+ height: 256
43
+
44
+ # ---------- Baselines ----------
45
+ habitat_baselines:
46
+ verbose: False
47
+ trainer_name: "ddppo"
48
+ torch_gpu_id: 0
49
+ tensorboard_dir: "tb"
50
+ video_dir: "video_dir"
51
+ test_episode_count: -1
52
+ eval_ckpt_path_dir: "data/new_checkpoints"
53
+ num_environments: 4
54
+ checkpoint_folder: "data/new_checkpoints"
55
+ num_updates: -1
56
+ total_num_steps: 5e8
57
+ log_interval: 10
58
+ num_checkpoints: 100
59
+ force_torch_single_threaded: True
60
+
61
+ rl:
62
+ policy:
63
+ main_agent:
64
+ name: "PointNavResNetPolicy"
65
+
66
+ ppo:
67
+ clip_param: 0.2
68
+ ppo_epoch: 2
69
+ num_mini_batch: 2
70
+ value_loss_coef: 0.5
71
+ entropy_coef: 0.01
72
+ lr: 2.5e-4
73
+ eps: 1e-5
74
+ max_grad_norm: 0.2
75
+ num_steps: 128
76
+ use_gae: True
77
+ gamma: 0.99
78
+ tau: 0.95
79
+ use_linear_clip_decay: False
80
+ use_linear_lr_decay: False
81
+ reward_window_size: 50
82
+ use_normalized_advantage: False
83
+ hidden_size: 512
84
+ use_double_buffered_sampler: False
85
+
86
+ ddppo:
87
+ sync_frac: 0.6
88
+ distrib_backend: NCCL
89
+ pretrained_weights: ""
90
+ pretrained: False
91
+ pretrained_encoder: False
92
+ train_encoder: True
93
+ reset_critic: True
94
+ backbone: resnet50
95
+ rnn_type: LSTM
96
+ num_recurrent_layers: 2
configs/vln_gs_eval.yaml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # StreamVLN evaluation config for Gaussian Splatting scenes.
4
+ # Used by StreamVLN evaluation (streamvln_eval.py).
5
+ #
6
+ # Note: StreamVLN training does NOT use habitat configs — it is pure
7
+ # transformer SFT via streamvln_train.py (see scripts_gs/train_vln.sh).
8
+ #
9
+ # Sensor/navigation parameters match StreamVLN defaults:
10
+ # RGB/Depth: 640x480, hfov=79
11
+ # forward_step_size=0.25m, turn_angle=15deg
12
+ # success_distance=3.0m
13
+
14
+ defaults:
15
+ - /habitat: habitat_config_base
16
+ - /habitat/task: vln_r2r
17
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgbd_agent
18
+ - /habitat/task/lab_sensors:
19
+ - gps_sensor
20
+ - compass_sensor
21
+ - _self_
22
+
23
+ # ---------- Dataset ----------
24
+ habitat:
25
+ dataset:
26
+ type: R2RVLN-v1
27
+ split: val
28
+ scenes_dir: data/scene_datasets/
29
+ data_path: data/scene_datasets/gs_scenes/episodes/vln/{split}/{split}.json.gz
30
+
31
+ # ---------- Environment ----------
32
+ environment:
33
+ max_episode_steps: 500
34
+ iterator_options:
35
+ max_scene_repeat_steps: 50000
36
+ shuffle: False
37
+
38
+ # ---------- Simulator ----------
39
+ simulator:
40
+ scene_dataset: data/scene_datasets/gs_scenes/val.scene_dataset_config.json
41
+ default_agent_navmesh: False
42
+ agents:
43
+ main_agent:
44
+ sim_sensors:
45
+ rgb_sensor:
46
+ width: 640
47
+ height: 480
48
+ hfov: 79
49
+ depth_sensor:
50
+ width: 640
51
+ height: 480
52
+ hfov: 79
53
+ min_depth: 0.0
54
+ max_depth: 10.0
55
+ forward_step_size: 0.25
56
+ turn_angle: 15
57
+ habitat_sim_v0:
58
+ gpu_device_id: 0
59
+
60
+ # ---------- Task / Metrics ----------
61
+ task:
62
+ measurements:
63
+ distance_to_goal:
64
+ type: DistanceToGoal
65
+ distance_to: POINT
66
+ success:
67
+ type: Success
68
+ success_distance: 3.0
69
+ spl:
70
+ type: SPL
71
+ oracle_success:
72
+ type: OracleSuccess
73
+ oracle_navigation_error:
74
+ type: OracleNavigationError
configs/vln_uninavid_gs_eval.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ #
3
+ # Uni-NaVid evaluation config for Gaussian Splatting scenes.
4
+ #
5
+ # Sensor/navigation parameters match standard Uni-NaVid:
6
+ # RGB: 640x480, hfov=120
7
+ # forward_step_size=0.25m, turn_angle=30deg
8
+ # success_distance=3.0m
9
+
10
+ defaults:
11
+ - /habitat: habitat_config_base
12
+ - /habitat/task: vln_r2r
13
+ - /habitat/simulator/sensor_setups@habitat.simulator.agents.main_agent: rgb_agent
14
+ - /habitat/task/lab_sensors:
15
+ - gps_sensor
16
+ - compass_sensor
17
+ - _self_
18
+
19
+ # ---------- Dataset ----------
20
+ habitat:
21
+ dataset:
22
+ type: R2RVLN-v1
23
+ split: val
24
+ scenes_dir: data/scene_datasets/
25
+ data_path: data/scene_datasets/gs_scenes/episodes/vln/{split}/{split}.json.gz
26
+
27
+ # ---------- Environment ----------
28
+ environment:
29
+ max_episode_steps: 500
30
+ iterator_options:
31
+ max_scene_repeat_steps: 50000
32
+ shuffle: False
33
+
34
+ # ---------- Simulator ----------
35
+ simulator:
36
+ scene_dataset: data/scene_datasets/gs_scenes/val.scene_dataset_config.json
37
+ default_agent_navmesh: False
38
+ agents:
39
+ main_agent:
40
+ sim_sensors:
41
+ rgb_sensor:
42
+ width: 640
43
+ height: 480
44
+ hfov: 120
45
+ forward_step_size: 0.25
46
+ turn_angle: 30
47
+ habitat_sim_v0:
48
+ gpu_device_id: 0
49
+
50
+ # ---------- Task / Metrics ----------
51
+ task:
52
+ measurements:
53
+ distance_to_goal:
54
+ type: DistanceToGoal
55
+ distance_to: POINT
56
+ success:
57
+ type: Success
58
+ success_distance: 3.0
59
+ spl:
60
+ type: SPL