Gavin1411 commited on
Commit
0ab77f4
·
verified ·
1 Parent(s): e92526d

Upload policy weights, train config and readme

Browse files
Files changed (5) hide show
  1. README.md +70 -0
  2. adapter_config.json +43 -0
  3. adapter_model.safetensors +3 -0
  4. config.json +87 -0
  5. train_config.json +289 -0
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: HuggingFaceVLA/libero
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: pi05
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - lerobot
9
+ - robotics
10
+ - pi05
11
+ ---
12
+
13
+ # Model Card for pi05
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ **π₀.₅ (Pi05) Policy**
19
+
20
+ π₀.₅ is a Vision-Language-Action model with open-world generalization, from Physical Intelligence. The LeRobot implementation is adapted from their open source OpenPI repository.
21
+
22
+ **Model Overview**
23
+
24
+ π₀.₅ represents a significant evolution from π₀, developed by Physical Intelligence to address a big challenge in robotics: open-world generalization. While robots can perform impressive tasks in controlled environments, π₀.₅ is designed to generalize to entirely new environments and situations that were never seen during training.
25
+
26
+ For more details, see the [Physical Intelligence π₀.₅ blog post](https://www.physicalintelligence.company/blog/pi05).
27
+
28
+
29
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
30
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
31
+
32
+ ---
33
+
34
+ ## How to Get Started with the Model
35
+
36
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
37
+ Below is the short version on how to train and run inference/eval:
38
+
39
+ ### Train from scratch
40
+
41
+ ```bash
42
+ lerobot-train \
43
+ --dataset.repo_id=${HF_USER}/<dataset> \
44
+ --policy.type=act \
45
+ --output_dir=outputs/train/<desired_policy_repo_id> \
46
+ --job_name=lerobot_training \
47
+ --policy.device=cuda \
48
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
49
+ --wandb.enable=true
50
+ ```
51
+
52
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
53
+
54
+ ### Evaluate the policy/run inference
55
+
56
+ ```bash
57
+ lerobot-record \
58
+ --robot.type=so100_follower \
59
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
60
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
61
+ --episodes=10
62
+ ```
63
+
64
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
65
+
66
+ ---
67
+
68
+ ## Model Details
69
+
70
+ - **License:** apache-2.0
adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": {
6
+ "base_model_class": "PI05Policy",
7
+ "parent_library": "lerobot.policies.pi05.modeling_pi05"
8
+ },
9
+ "base_model_name_or_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task31/train/checkpoints/last/pretrained_model",
10
+ "bias": "none",
11
+ "corda_config": null,
12
+ "ensure_weight_tying": false,
13
+ "eva_config": null,
14
+ "exclude_modules": null,
15
+ "fan_in_fan_out": false,
16
+ "inference_mode": true,
17
+ "init_lora_weights": true,
18
+ "layer_replication": null,
19
+ "layers_pattern": null,
20
+ "layers_to_transform": null,
21
+ "loftq_config": {},
22
+ "lora_alpha": 8,
23
+ "lora_bias": false,
24
+ "lora_dropout": 0.0,
25
+ "megatron_config": null,
26
+ "megatron_core": "megatron.core",
27
+ "modules_to_save": [
28
+ ".*\\.vision_tower\\..*"
29
+ ],
30
+ "peft_type": "LORA",
31
+ "peft_version": "0.18.1",
32
+ "qalora_group_size": 16,
33
+ "r": 16,
34
+ "rank_pattern": {},
35
+ "revision": null,
36
+ "target_modules": ".*\\.paligemma\\.model\\.language_model\\..*\\.self_attn\\.(q|v)_proj|.*\\.gemma_expert\\..*\\.self_attn\\.(q|v)_proj|model\\.(state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out)",
37
+ "target_parameters": null,
38
+ "task_type": null,
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bac9ac47d9adb81c1a0331e9e0c46c523b9d3afb10f21a8c23195610fa52ff56
3
+ size 12547808
config.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "pi05",
3
+ "n_obs_steps": 1,
4
+ "input_features": {
5
+ "observation.images.image": {
6
+ "type": "VISUAL",
7
+ "shape": [
8
+ 3,
9
+ 256,
10
+ 256
11
+ ]
12
+ },
13
+ "observation.images.image2": {
14
+ "type": "VISUAL",
15
+ "shape": [
16
+ 3,
17
+ 256,
18
+ 256
19
+ ]
20
+ },
21
+ "observation.state": {
22
+ "type": "STATE",
23
+ "shape": [
24
+ 8
25
+ ]
26
+ }
27
+ },
28
+ "output_features": {
29
+ "action": {
30
+ "type": "ACTION",
31
+ "shape": [
32
+ 7
33
+ ]
34
+ }
35
+ },
36
+ "device": "cuda",
37
+ "use_amp": false,
38
+ "use_peft": true,
39
+ "push_to_hub": true,
40
+ "repo_id": "Gavin1411/task32_train",
41
+ "private": null,
42
+ "tags": null,
43
+ "license": null,
44
+ "pretrained_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task31/train/checkpoints/last/pretrained_model",
45
+ "paligemma_variant": "gemma_2b",
46
+ "action_expert_variant": "gemma_300m",
47
+ "dtype": "bfloat16",
48
+ "chunk_size": 50,
49
+ "n_action_steps": 50,
50
+ "max_state_dim": 32,
51
+ "max_action_dim": 32,
52
+ "num_inference_steps": 10,
53
+ "time_sampling_beta_alpha": 1.5,
54
+ "time_sampling_beta_beta": 1.0,
55
+ "time_sampling_scale": 0.999,
56
+ "time_sampling_offset": 0.001,
57
+ "min_period": 0.004,
58
+ "max_period": 4.0,
59
+ "rtc_config": null,
60
+ "image_resolution": [
61
+ 224,
62
+ 224
63
+ ],
64
+ "empty_cameras": 0,
65
+ "tokenizer_max_length": 200,
66
+ "normalization_mapping": {
67
+ "VISUAL": "IDENTITY",
68
+ "STATE": "QUANTILES",
69
+ "ACTION": "QUANTILES"
70
+ },
71
+ "gradient_checkpointing": true,
72
+ "compile_model": false,
73
+ "compile_mode": "max-autotune",
74
+ "freeze_vision_encoder": false,
75
+ "train_expert_only": false,
76
+ "optimizer_lr": 2.5e-05,
77
+ "optimizer_betas": [
78
+ 0.9,
79
+ 0.95
80
+ ],
81
+ "optimizer_eps": 1e-08,
82
+ "optimizer_weight_decay": 0.01,
83
+ "optimizer_grad_clip_norm": 1.0,
84
+ "scheduler_warmup_steps": 1000,
85
+ "scheduler_decay_steps": 30000,
86
+ "scheduler_decay_lr": 2.5e-06
87
+ }
train_config.json ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "HuggingFaceVLA/libero",
4
+ "root": null,
5
+ "episodes": [
6
+ 32,
7
+ 70,
8
+ 71,
9
+ 72,
10
+ 111,
11
+ 112,
12
+ 152,
13
+ 192,
14
+ 231,
15
+ 232,
16
+ 271,
17
+ 272,
18
+ 310,
19
+ 312,
20
+ 350,
21
+ 352,
22
+ 392,
23
+ 432,
24
+ 472,
25
+ 512,
26
+ 552,
27
+ 590,
28
+ 592,
29
+ 630,
30
+ 632,
31
+ 672,
32
+ 710,
33
+ 712,
34
+ 752,
35
+ 792,
36
+ 832,
37
+ 872,
38
+ 912,
39
+ 952,
40
+ 992,
41
+ 1032,
42
+ 1072,
43
+ 1111,
44
+ 1112,
45
+ 1152,
46
+ 1192,
47
+ 1232,
48
+ 1272,
49
+ 1312,
50
+ 1352,
51
+ 1391,
52
+ 1392,
53
+ 1432,
54
+ 1472,
55
+ 1511,
56
+ 1512,
57
+ 1552,
58
+ 1592,
59
+ 1630,
60
+ 1632,
61
+ 1672
62
+ ],
63
+ "image_transforms": {
64
+ "enable": false,
65
+ "max_num_transforms": 3,
66
+ "random_order": false,
67
+ "tfs": {
68
+ "brightness": {
69
+ "weight": 1.0,
70
+ "type": "ColorJitter",
71
+ "kwargs": {
72
+ "brightness": [
73
+ 0.8,
74
+ 1.2
75
+ ]
76
+ }
77
+ },
78
+ "contrast": {
79
+ "weight": 1.0,
80
+ "type": "ColorJitter",
81
+ "kwargs": {
82
+ "contrast": [
83
+ 0.8,
84
+ 1.2
85
+ ]
86
+ }
87
+ },
88
+ "saturation": {
89
+ "weight": 1.0,
90
+ "type": "ColorJitter",
91
+ "kwargs": {
92
+ "saturation": [
93
+ 0.5,
94
+ 1.5
95
+ ]
96
+ }
97
+ },
98
+ "hue": {
99
+ "weight": 1.0,
100
+ "type": "ColorJitter",
101
+ "kwargs": {
102
+ "hue": [
103
+ -0.05,
104
+ 0.05
105
+ ]
106
+ }
107
+ },
108
+ "sharpness": {
109
+ "weight": 1.0,
110
+ "type": "SharpnessJitter",
111
+ "kwargs": {
112
+ "sharpness": [
113
+ 0.5,
114
+ 1.5
115
+ ]
116
+ }
117
+ },
118
+ "affine": {
119
+ "weight": 1.0,
120
+ "type": "RandomAffine",
121
+ "kwargs": {
122
+ "degrees": [
123
+ -5.0,
124
+ 5.0
125
+ ],
126
+ "translate": [
127
+ 0.05,
128
+ 0.05
129
+ ]
130
+ }
131
+ }
132
+ }
133
+ },
134
+ "revision": null,
135
+ "use_imagenet_stats": true,
136
+ "video_backend": "torchcodec",
137
+ "streaming": false
138
+ },
139
+ "env": null,
140
+ "policy": {
141
+ "type": "pi05",
142
+ "n_obs_steps": 1,
143
+ "input_features": {
144
+ "observation.images.image": {
145
+ "type": "VISUAL",
146
+ "shape": [
147
+ 3,
148
+ 256,
149
+ 256
150
+ ]
151
+ },
152
+ "observation.images.image2": {
153
+ "type": "VISUAL",
154
+ "shape": [
155
+ 3,
156
+ 256,
157
+ 256
158
+ ]
159
+ },
160
+ "observation.state": {
161
+ "type": "STATE",
162
+ "shape": [
163
+ 8
164
+ ]
165
+ }
166
+ },
167
+ "output_features": {
168
+ "action": {
169
+ "type": "ACTION",
170
+ "shape": [
171
+ 7
172
+ ]
173
+ }
174
+ },
175
+ "device": "cuda",
176
+ "use_amp": false,
177
+ "use_peft": true,
178
+ "push_to_hub": true,
179
+ "repo_id": "Gavin1411/task32_train",
180
+ "private": null,
181
+ "tags": null,
182
+ "license": null,
183
+ "pretrained_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task31/train/checkpoints/last/pretrained_model",
184
+ "paligemma_variant": "gemma_2b",
185
+ "action_expert_variant": "gemma_300m",
186
+ "dtype": "bfloat16",
187
+ "chunk_size": 50,
188
+ "n_action_steps": 50,
189
+ "max_state_dim": 32,
190
+ "max_action_dim": 32,
191
+ "num_inference_steps": 10,
192
+ "time_sampling_beta_alpha": 1.5,
193
+ "time_sampling_beta_beta": 1.0,
194
+ "time_sampling_scale": 0.999,
195
+ "time_sampling_offset": 0.001,
196
+ "min_period": 0.004,
197
+ "max_period": 4.0,
198
+ "rtc_config": null,
199
+ "image_resolution": [
200
+ 224,
201
+ 224
202
+ ],
203
+ "empty_cameras": 0,
204
+ "tokenizer_max_length": 200,
205
+ "normalization_mapping": {
206
+ "VISUAL": "IDENTITY",
207
+ "STATE": "QUANTILES",
208
+ "ACTION": "QUANTILES"
209
+ },
210
+ "gradient_checkpointing": true,
211
+ "compile_model": false,
212
+ "compile_mode": "max-autotune",
213
+ "freeze_vision_encoder": false,
214
+ "train_expert_only": false,
215
+ "optimizer_lr": 2.5e-05,
216
+ "optimizer_betas": [
217
+ 0.9,
218
+ 0.95
219
+ ],
220
+ "optimizer_eps": 1e-08,
221
+ "optimizer_weight_decay": 0.01,
222
+ "optimizer_grad_clip_norm": 1.0,
223
+ "scheduler_warmup_steps": 1000,
224
+ "scheduler_decay_steps": 30000,
225
+ "scheduler_decay_lr": 2.5e-06
226
+ },
227
+ "output_dir": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task32/train",
228
+ "job_name": "pi05",
229
+ "resume": false,
230
+ "seed": 42,
231
+ "cudnn_deterministic": false,
232
+ "num_workers": 4,
233
+ "batch_size": 8,
234
+ "steps": 10000,
235
+ "eval_freq": 20000,
236
+ "log_freq": 100,
237
+ "tolerance_s": 0.0001,
238
+ "save_checkpoint": true,
239
+ "save_freq": 5000,
240
+ "use_policy_training_preset": true,
241
+ "optimizer": {
242
+ "type": "adamw",
243
+ "lr": 2.5e-05,
244
+ "weight_decay": 0.01,
245
+ "grad_clip_norm": 1.0,
246
+ "betas": [
247
+ 0.9,
248
+ 0.95
249
+ ],
250
+ "eps": 1e-08
251
+ },
252
+ "scheduler": {
253
+ "type": "cosine_decay_with_warmup",
254
+ "num_warmup_steps": 1000,
255
+ "num_decay_steps": 30000,
256
+ "peak_lr": 2.5e-05,
257
+ "decay_lr": 2.5e-06
258
+ },
259
+ "eval": {
260
+ "n_episodes": 50,
261
+ "batch_size": 50,
262
+ "use_async_envs": false
263
+ },
264
+ "wandb": {
265
+ "enable": false,
266
+ "disable_artifact": false,
267
+ "project": "lerobot",
268
+ "entity": null,
269
+ "notes": null,
270
+ "run_id": null,
271
+ "mode": null
272
+ },
273
+ "peft": {
274
+ "target_modules": ".*\\.paligemma\\.model\\.language_model\\..*\\.self_attn\\.(q|v)_proj|.*\\.gemma_expert\\..*\\.self_attn\\.(q|v)_proj|model\\.(state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out)",
275
+ "full_training_modules": [
276
+ ".*\\.vision_tower\\..*"
277
+ ],
278
+ "method_type": "LORA",
279
+ "init_type": null,
280
+ "r": 16
281
+ },
282
+ "use_rabc": false,
283
+ "rabc_progress_path": null,
284
+ "rabc_kappa": 0.01,
285
+ "rabc_epsilon": 1e-06,
286
+ "rabc_head_mode": "sparse",
287
+ "rename_map": {},
288
+ "checkpoint_path": null
289
+ }