Robotics
LeRobot
Safetensors
smolvla
Inishds commited on
Commit
f8c35bf
·
verified ·
1 Parent(s): 7a27ba0

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +63 -0
  2. config.json +99 -0
  3. model.safetensors +3 -0
  4. train_config.json +211 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: lerobot/smolvla_base
3
+ datasets: aopolin-lv/libero_object_no_noops_lerobot_v21
4
+ library_name: lerobot
5
+ license: apache-2.0
6
+ model_name: smolvla
7
+ pipeline_tag: robotics
8
+ tags:
9
+ - robotics
10
+ - smolvla
11
+ - lerobot
12
+ ---
13
+
14
+ # Model Card for smolvla
15
+
16
+ <!-- Provide a quick summary of what the model is/does. -->
17
+
18
+
19
+ [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware.
20
+
21
+
22
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
23
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
24
+
25
+ ---
26
+
27
+ ## How to Get Started with the Model
28
+
29
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
30
+ Below is the short version on how to train and run inference/eval:
31
+
32
+ ### Train from scratch
33
+
34
+ ```bash
35
+ python -m lerobot.scripts.train \
36
+ --dataset.repo_id=${HF_USER}/<dataset> \
37
+ --policy.type=act \
38
+ --output_dir=outputs/train/<desired_policy_repo_id> \
39
+ --job_name=lerobot_training \
40
+ --policy.device=cuda \
41
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
42
+ --wandb.enable=true
43
+ ```
44
+
45
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
46
+
47
+ ### Evaluate the policy/run inference
48
+
49
+ ```bash
50
+ python -m lerobot.record \
51
+ --robot.type=so100_follower \
52
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
53
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
54
+ --episodes=10
55
+ ```
56
+
57
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
58
+
59
+ ---
60
+
61
+ ## Model Details
62
+
63
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "smolvla",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "IDENTITY",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.images.image": {
11
+ "type": "VISUAL",
12
+ "shape": [
13
+ 256,
14
+ 256,
15
+ 3
16
+ ]
17
+ },
18
+ "observation.images.wrist_image": {
19
+ "type": "VISUAL",
20
+ "shape": [
21
+ 256,
22
+ 256,
23
+ 3
24
+ ]
25
+ },
26
+ "observation.state": {
27
+ "type": "STATE",
28
+ "shape": [
29
+ 8
30
+ ]
31
+ }
32
+ },
33
+ "output_features": {
34
+ "action": {
35
+ "type": "ACTION",
36
+ "shape": [
37
+ 7
38
+ ]
39
+ }
40
+ },
41
+ "device": "cuda",
42
+ "use_amp": false,
43
+ "use_lora": true,
44
+ "lora_config": {
45
+ "r": 8,
46
+ "lora_alpha": 16,
47
+ "lora_dropout": 0.05
48
+ },
49
+ "push_lora_adapter_to_hub": true,
50
+ "lora_adapter_repo_id": "Inishds/smolvla_adaptor",
51
+ "lora_base_model": "lerobot/smolvla_base",
52
+ "lora_adapter_private": false,
53
+ "lora_adapter_token": null,
54
+ "push_to_hub": true,
55
+ "repo_id": "Inishds/smolvla_adaptor_object",
56
+ "private": null,
57
+ "tags": null,
58
+ "license": null,
59
+ "chunk_size": 50,
60
+ "n_action_steps": 50,
61
+ "max_state_dim": 32,
62
+ "max_action_dim": 32,
63
+ "resize_imgs_with_padding": [
64
+ 512,
65
+ 512
66
+ ],
67
+ "empty_cameras": 0,
68
+ "adapt_to_pi_aloha": false,
69
+ "use_delta_joint_actions_aloha": false,
70
+ "tokenizer_max_length": 48,
71
+ "num_steps": 10,
72
+ "use_cache": true,
73
+ "freeze_vision_encoder": true,
74
+ "train_expert_only": true,
75
+ "train_state_proj": true,
76
+ "optimizer_lr": 0.0001,
77
+ "optimizer_betas": [
78
+ 0.9,
79
+ 0.95
80
+ ],
81
+ "optimizer_eps": 1e-08,
82
+ "optimizer_weight_decay": 1e-10,
83
+ "optimizer_grad_clip_norm": 10.0,
84
+ "scheduler_warmup_steps": 1000,
85
+ "scheduler_decay_steps": 30000,
86
+ "scheduler_decay_lr": 2.5e-06,
87
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
88
+ "load_vlm_weights": true,
89
+ "add_image_special_tokens": false,
90
+ "attention_mode": "cross_attn",
91
+ "prefix_length": 0,
92
+ "pad_language_to": "max_length",
93
+ "num_expert_layers": 0,
94
+ "num_vlm_layers": 16,
95
+ "self_attn_every_n_layers": 2,
96
+ "expert_width_multiplier": 0.75,
97
+ "min_period": 0.004,
98
+ "max_period": 4.0
99
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14490a4cdaa65e5f0af46afe75928e72e7c8f32e01529dbbba68e1712ad403f
3
+ size 919242040
train_config.json ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "aopolin-lv/libero_object_no_noops_lerobot_v21",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec"
66
+ },
67
+ "env": null,
68
+ "policy": {
69
+ "type": "smolvla",
70
+ "n_obs_steps": 1,
71
+ "normalization_mapping": {
72
+ "VISUAL": "IDENTITY",
73
+ "STATE": "MEAN_STD",
74
+ "ACTION": "MEAN_STD"
75
+ },
76
+ "input_features": {
77
+ "observation.images.image": {
78
+ "type": "VISUAL",
79
+ "shape": [
80
+ 256,
81
+ 256,
82
+ 3
83
+ ]
84
+ },
85
+ "observation.images.wrist_image": {
86
+ "type": "VISUAL",
87
+ "shape": [
88
+ 256,
89
+ 256,
90
+ 3
91
+ ]
92
+ },
93
+ "observation.state": {
94
+ "type": "STATE",
95
+ "shape": [
96
+ 8
97
+ ]
98
+ }
99
+ },
100
+ "output_features": {
101
+ "action": {
102
+ "type": "ACTION",
103
+ "shape": [
104
+ 7
105
+ ]
106
+ }
107
+ },
108
+ "device": "cuda",
109
+ "use_amp": false,
110
+ "use_lora": true,
111
+ "lora_config": {
112
+ "r": 8,
113
+ "lora_alpha": 16,
114
+ "lora_dropout": 0.05
115
+ },
116
+ "push_lora_adapter_to_hub": true,
117
+ "lora_adapter_repo_id": "Inishds/smolvla_adaptor",
118
+ "lora_base_model": "lerobot/smolvla_base",
119
+ "lora_adapter_private": false,
120
+ "lora_adapter_token": null,
121
+ "push_to_hub": true,
122
+ "repo_id": "Inishds/smolvla_adaptor_object",
123
+ "private": null,
124
+ "tags": null,
125
+ "license": null,
126
+ "chunk_size": 50,
127
+ "n_action_steps": 50,
128
+ "max_state_dim": 32,
129
+ "max_action_dim": 32,
130
+ "resize_imgs_with_padding": [
131
+ 512,
132
+ 512
133
+ ],
134
+ "empty_cameras": 0,
135
+ "adapt_to_pi_aloha": false,
136
+ "use_delta_joint_actions_aloha": false,
137
+ "tokenizer_max_length": 48,
138
+ "num_steps": 10,
139
+ "use_cache": true,
140
+ "freeze_vision_encoder": true,
141
+ "train_expert_only": true,
142
+ "train_state_proj": true,
143
+ "optimizer_lr": 0.0001,
144
+ "optimizer_betas": [
145
+ 0.9,
146
+ 0.95
147
+ ],
148
+ "optimizer_eps": 1e-08,
149
+ "optimizer_weight_decay": 1e-10,
150
+ "optimizer_grad_clip_norm": 10.0,
151
+ "scheduler_warmup_steps": 1000,
152
+ "scheduler_decay_steps": 30000,
153
+ "scheduler_decay_lr": 2.5e-06,
154
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
155
+ "load_vlm_weights": true,
156
+ "add_image_special_tokens": false,
157
+ "attention_mode": "cross_attn",
158
+ "prefix_length": 0,
159
+ "pad_language_to": "max_length",
160
+ "num_expert_layers": 0,
161
+ "num_vlm_layers": 16,
162
+ "self_attn_every_n_layers": 2,
163
+ "expert_width_multiplier": 0.75,
164
+ "min_period": 0.004,
165
+ "max_period": 4.0
166
+ },
167
+ "output_dir": "outputs/train/my_smolvla14",
168
+ "job_name": "my_smolvla_training",
169
+ "resume": false,
170
+ "seed": 1000,
171
+ "num_workers": 4,
172
+ "batch_size": 64,
173
+ "steps": 60000,
174
+ "eval_freq": 2,
175
+ "log_freq": 200,
176
+ "save_checkpoint": true,
177
+ "save_freq": 20000,
178
+ "use_policy_training_preset": true,
179
+ "optimizer": {
180
+ "type": "adamw",
181
+ "lr": 0.0001,
182
+ "weight_decay": 1e-10,
183
+ "grad_clip_norm": 10.0,
184
+ "betas": [
185
+ 0.9,
186
+ 0.95
187
+ ],
188
+ "eps": 1e-08
189
+ },
190
+ "scheduler": {
191
+ "type": "cosine_decay_with_warmup",
192
+ "num_warmup_steps": 1000,
193
+ "num_decay_steps": 30000,
194
+ "peak_lr": 0.0001,
195
+ "decay_lr": 2.5e-06
196
+ },
197
+ "eval": {
198
+ "n_episodes": 50,
199
+ "batch_size": 50,
200
+ "use_async_envs": false
201
+ },
202
+ "wandb": {
203
+ "enable": false,
204
+ "disable_artifact": false,
205
+ "project": "lerobot",
206
+ "entity": null,
207
+ "notes": null,
208
+ "run_id": null,
209
+ "mode": null
210
+ }
211
+ }