Upload policy weights, train config and readme
Browse files- README.md +1 -1
- adapter_config.json +5 -3
- adapter_model.safetensors +2 -2
- config.json +1 -1
- train_config.json +13 -4
README.md
CHANGED
|
@@ -6,8 +6,8 @@ model_name: pi05
|
|
| 6 |
pipeline_tag: robotics
|
| 7 |
tags:
|
| 8 |
- pi05
|
| 9 |
-
- lerobot
|
| 10 |
- robotics
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
# Model Card for pi05
|
|
|
|
| 6 |
pipeline_tag: robotics
|
| 7 |
tags:
|
| 8 |
- pi05
|
|
|
|
| 9 |
- robotics
|
| 10 |
+
- lerobot
|
| 11 |
---
|
| 12 |
|
| 13 |
# Model Card for pi05
|
adapter_config.json
CHANGED
|
@@ -6,7 +6,7 @@
|
|
| 6 |
"base_model_class": "PI05Policy",
|
| 7 |
"parent_library": "lerobot.policies.pi05.modeling_pi05"
|
| 8 |
},
|
| 9 |
-
"base_model_name_or_path": "/data/taojiachen/Continuous_VLA/exp/
|
| 10 |
"bias": "none",
|
| 11 |
"corda_config": null,
|
| 12 |
"ensure_weight_tying": false,
|
|
@@ -24,14 +24,16 @@
|
|
| 24 |
"lora_dropout": 0.0,
|
| 25 |
"megatron_config": null,
|
| 26 |
"megatron_core": "megatron.core",
|
| 27 |
-
"modules_to_save": [
|
|
|
|
|
|
|
| 28 |
"peft_type": "LORA",
|
| 29 |
"peft_version": "0.18.1",
|
| 30 |
"qalora_group_size": 16,
|
| 31 |
"r": 16,
|
| 32 |
"rank_pattern": {},
|
| 33 |
"revision": null,
|
| 34 |
-
"target_modules": "(.*\\.gemma_expert\\..*\\.self_attn\\.(q|v)_proj|model\\.(state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out)
|
| 35 |
"target_parameters": null,
|
| 36 |
"task_type": null,
|
| 37 |
"trainable_token_indices": null,
|
|
|
|
| 6 |
"base_model_class": "PI05Policy",
|
| 7 |
"parent_library": "lerobot.policies.pi05.modeling_pi05"
|
| 8 |
},
|
| 9 |
+
"base_model_name_or_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task30/train/checkpoints/last/pretrained_model",
|
| 10 |
"bias": "none",
|
| 11 |
"corda_config": null,
|
| 12 |
"ensure_weight_tying": false,
|
|
|
|
| 24 |
"lora_dropout": 0.0,
|
| 25 |
"megatron_config": null,
|
| 26 |
"megatron_core": "megatron.core",
|
| 27 |
+
"modules_to_save": [
|
| 28 |
+
".*\\.vision_tower\\..*"
|
| 29 |
+
],
|
| 30 |
"peft_type": "LORA",
|
| 31 |
"peft_version": "0.18.1",
|
| 32 |
"qalora_group_size": 16,
|
| 33 |
"r": 16,
|
| 34 |
"rank_pattern": {},
|
| 35 |
"revision": null,
|
| 36 |
+
"target_modules": ".*\\.paligemma\\.model\\.language_model\\..*\\.self_attn\\.(q|v)_proj|.*\\.gemma_expert\\..*\\.self_attn\\.(q|v)_proj|model\\.(state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out)",
|
| 37 |
"target_parameters": null,
|
| 38 |
"task_type": null,
|
| 39 |
"trainable_token_indices": null,
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08fba12e4837a1641214136229f4683c7abc8350bbee36cbe960d52612a91032
|
| 3 |
+
size 12547808
|
config.json
CHANGED
|
@@ -41,7 +41,7 @@
|
|
| 41 |
"private": null,
|
| 42 |
"tags": null,
|
| 43 |
"license": null,
|
| 44 |
-
"pretrained_path": "/data/taojiachen/Continuous_VLA/exp/
|
| 45 |
"paligemma_variant": "gemma_2b",
|
| 46 |
"action_expert_variant": "gemma_300m",
|
| 47 |
"dtype": "bfloat16",
|
|
|
|
| 41 |
"private": null,
|
| 42 |
"tags": null,
|
| 43 |
"license": null,
|
| 44 |
+
"pretrained_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task30/train/checkpoints/last/pretrained_model",
|
| 45 |
"paligemma_variant": "gemma_2b",
|
| 46 |
"action_expert_variant": "gemma_300m",
|
| 47 |
"dtype": "bfloat16",
|
train_config.json
CHANGED
|
@@ -4,22 +4,28 @@
|
|
| 4 |
"root": null,
|
| 5 |
"episodes": [
|
| 6 |
31,
|
|
|
|
| 7 |
71,
|
| 8 |
111,
|
| 9 |
151,
|
| 10 |
191,
|
| 11 |
231,
|
| 12 |
271,
|
|
|
|
| 13 |
311,
|
|
|
|
| 14 |
351,
|
| 15 |
391,
|
| 16 |
431,
|
| 17 |
471,
|
| 18 |
511,
|
| 19 |
551,
|
|
|
|
| 20 |
591,
|
|
|
|
| 21 |
631,
|
| 22 |
671,
|
|
|
|
| 23 |
711,
|
| 24 |
751,
|
| 25 |
791,
|
|
@@ -43,6 +49,7 @@
|
|
| 43 |
1511,
|
| 44 |
1551,
|
| 45 |
1591,
|
|
|
|
| 46 |
1631,
|
| 47 |
1671
|
| 48 |
],
|
|
@@ -166,7 +173,7 @@
|
|
| 166 |
"private": null,
|
| 167 |
"tags": null,
|
| 168 |
"license": null,
|
| 169 |
-
"pretrained_path": "/data/taojiachen/Continuous_VLA/exp/
|
| 170 |
"paligemma_variant": "gemma_2b",
|
| 171 |
"action_expert_variant": "gemma_300m",
|
| 172 |
"dtype": "bfloat16",
|
|
@@ -210,7 +217,7 @@
|
|
| 210 |
"scheduler_decay_steps": 30000,
|
| 211 |
"scheduler_decay_lr": 2.5e-06
|
| 212 |
},
|
| 213 |
-
"output_dir": "/data/taojiachen/Continuous_VLA/exp/
|
| 214 |
"job_name": "pi05",
|
| 215 |
"resume": false,
|
| 216 |
"seed": 42,
|
|
@@ -257,8 +264,10 @@
|
|
| 257 |
"mode": null
|
| 258 |
},
|
| 259 |
"peft": {
|
| 260 |
-
"target_modules":
|
| 261 |
-
"full_training_modules":
|
|
|
|
|
|
|
| 262 |
"method_type": "LORA",
|
| 263 |
"init_type": null,
|
| 264 |
"r": 16
|
|
|
|
| 4 |
"root": null,
|
| 5 |
"episodes": [
|
| 6 |
31,
|
| 7 |
+
70,
|
| 8 |
71,
|
| 9 |
111,
|
| 10 |
151,
|
| 11 |
191,
|
| 12 |
231,
|
| 13 |
271,
|
| 14 |
+
310,
|
| 15 |
311,
|
| 16 |
+
350,
|
| 17 |
351,
|
| 18 |
391,
|
| 19 |
431,
|
| 20 |
471,
|
| 21 |
511,
|
| 22 |
551,
|
| 23 |
+
590,
|
| 24 |
591,
|
| 25 |
+
630,
|
| 26 |
631,
|
| 27 |
671,
|
| 28 |
+
710,
|
| 29 |
711,
|
| 30 |
751,
|
| 31 |
791,
|
|
|
|
| 49 |
1511,
|
| 50 |
1551,
|
| 51 |
1591,
|
| 52 |
+
1630,
|
| 53 |
1631,
|
| 54 |
1671
|
| 55 |
],
|
|
|
|
| 173 |
"private": null,
|
| 174 |
"tags": null,
|
| 175 |
"license": null,
|
| 176 |
+
"pretrained_path": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task30/train/checkpoints/last/pretrained_model",
|
| 177 |
"paligemma_variant": "gemma_2b",
|
| 178 |
"action_expert_variant": "gemma_300m",
|
| 179 |
"dtype": "bfloat16",
|
|
|
|
| 217 |
"scheduler_decay_steps": 30000,
|
| 218 |
"scheduler_decay_lr": 2.5e-06
|
| 219 |
},
|
| 220 |
+
"output_dir": "/data/taojiachen/Continuous_VLA/exp/er/spatial/seed42/task31/train",
|
| 221 |
"job_name": "pi05",
|
| 222 |
"resume": false,
|
| 223 |
"seed": 42,
|
|
|
|
| 264 |
"mode": null
|
| 265 |
},
|
| 266 |
"peft": {
|
| 267 |
+
"target_modules": ".*\\.paligemma\\.model\\.language_model\\..*\\.self_attn\\.(q|v)_proj|.*\\.gemma_expert\\..*\\.self_attn\\.(q|v)_proj|model\\.(state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out)",
|
| 268 |
+
"full_training_modules": [
|
| 269 |
+
".*\\.vision_tower\\..*"
|
| 270 |
+
],
|
| 271 |
"method_type": "LORA",
|
| 272 |
"init_type": null,
|
| 273 |
"r": 16
|