shuohsuan commited on
Commit
558b494
·
verified ·
1 Parent(s): 503092f

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +67 -0
  2. config.json +88 -0
  3. model.safetensors +3 -0
  4. train_config.json +206 -0
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: lerobot/smolvla_base
3
+ datasets:
4
+ - shuohsuan/grasp0
5
+ - shuohsuan/grasp1
6
+ - shuohsuan/grasp2
7
+ - shuohsuan/grasp3
8
+ library_name: lerobot
9
+ license: apache-2.0
10
+ model_name: smolvla
11
+ pipeline_tag: robotics
12
+ tags:
13
+ - lerobot
14
+ - robotics
15
+ - smolvla
16
+ ---
17
+
18
+ # Model Card for smolvla
19
+
20
+ <!-- Provide a quick summary of what the model is/does. -->
21
+
22
+
23
+ [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware.
24
+
25
+
26
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
27
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
28
+
29
+ ---
30
+
31
+ ## How to Get Started with the Model
32
+
33
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
34
+ Below is the short version on how to train and run inference/eval:
35
+
36
+ ### Train from scratch
37
+
38
+ ```bash
39
+ python -m lerobot.scripts.train \
40
+ --dataset.repo_id=${HF_USER}/<dataset> \
41
+ --policy.type=act \
42
+ --output_dir=outputs/train/<desired_policy_repo_id> \
43
+ --job_name=lerobot_training \
44
+ --policy.device=cuda \
45
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
46
+ --wandb.enable=true
47
+ ```
48
+
49
+ *Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`.*
50
+
51
+ ### Evaluate the policy/run inference
52
+
53
+ ```bash
54
+ python -m lerobot.record \
55
+ --robot.type=so100_follower \
56
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
57
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
58
+ --episodes=10
59
+ ```
60
+
61
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
62
+
63
+ ---
64
+
65
+ ## Model Details
66
+
67
+ * **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "smolvla",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "IDENTITY",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 6
14
+ ]
15
+ },
16
+ "observation.images.side": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ },
24
+ "observation.images.wrist": {
25
+ "type": "VISUAL",
26
+ "shape": [
27
+ 3,
28
+ 480,
29
+ 640
30
+ ]
31
+ }
32
+ },
33
+ "output_features": {
34
+ "action": {
35
+ "type": "ACTION",
36
+ "shape": [
37
+ 6
38
+ ]
39
+ }
40
+ },
41
+ "device": "cuda",
42
+ "use_amp": false,
43
+ "push_to_hub": true,
44
+ "repo_id": "shuohsuan/svla_grasp0123",
45
+ "private": null,
46
+ "tags": null,
47
+ "license": null,
48
+ "chunk_size": 50,
49
+ "n_action_steps": 50,
50
+ "max_state_dim": 32,
51
+ "max_action_dim": 32,
52
+ "resize_imgs_with_padding": [
53
+ 512,
54
+ 512
55
+ ],
56
+ "empty_cameras": 0,
57
+ "adapt_to_pi_aloha": false,
58
+ "use_delta_joint_actions_aloha": false,
59
+ "tokenizer_max_length": 48,
60
+ "num_steps": 10,
61
+ "use_cache": true,
62
+ "freeze_vision_encoder": true,
63
+ "train_expert_only": true,
64
+ "train_state_proj": true,
65
+ "optimizer_lr": 0.0001,
66
+ "optimizer_betas": [
67
+ 0.9,
68
+ 0.95
69
+ ],
70
+ "optimizer_eps": 1e-08,
71
+ "optimizer_weight_decay": 1e-10,
72
+ "optimizer_grad_clip_norm": 10.0,
73
+ "scheduler_warmup_steps": 1000,
74
+ "scheduler_decay_steps": 30000,
75
+ "scheduler_decay_lr": 2.5e-06,
76
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
77
+ "load_vlm_weights": true,
78
+ "add_image_special_tokens": false,
79
+ "attention_mode": "cross_attn",
80
+ "prefix_length": 0,
81
+ "pad_language_to": "max_length",
82
+ "num_expert_layers": 0,
83
+ "num_vlm_layers": 16,
84
+ "self_attn_every_n_layers": 2,
85
+ "expert_width_multiplier": 0.75,
86
+ "min_period": 0.004,
87
+ "max_period": 4.0
88
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c014557d01eaf2ecc9689c804d30e99e34f0f42380fe725212e2f8530678f73e
3
+ size 906713296
train_config.json ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": [
4
+ "shuohsuan/grasp0",
5
+ "shuohsuan/grasp1",
6
+ "shuohsuan/grasp2",
7
+ "shuohsuan/grasp3"
8
+ ],
9
+ "repo_idx": 4,
10
+ "root": null,
11
+ "episodes": null,
12
+ "image_transforms": {
13
+ "enable": false,
14
+ "max_num_transforms": 3,
15
+ "random_order": false,
16
+ "tfs": {
17
+ "brightness": {
18
+ "weight": 1.0,
19
+ "type": "ColorJitter",
20
+ "kwargs": {
21
+ "brightness": [
22
+ 0.8,
23
+ 1.2
24
+ ]
25
+ }
26
+ },
27
+ "contrast": {
28
+ "weight": 1.0,
29
+ "type": "ColorJitter",
30
+ "kwargs": {
31
+ "contrast": [
32
+ 0.8,
33
+ 1.2
34
+ ]
35
+ }
36
+ },
37
+ "saturation": {
38
+ "weight": 1.0,
39
+ "type": "ColorJitter",
40
+ "kwargs": {
41
+ "saturation": [
42
+ 0.5,
43
+ 1.5
44
+ ]
45
+ }
46
+ },
47
+ "hue": {
48
+ "weight": 1.0,
49
+ "type": "ColorJitter",
50
+ "kwargs": {
51
+ "hue": [
52
+ -0.05,
53
+ 0.05
54
+ ]
55
+ }
56
+ },
57
+ "sharpness": {
58
+ "weight": 1.0,
59
+ "type": "SharpnessJitter",
60
+ "kwargs": {
61
+ "sharpness": [
62
+ 0.5,
63
+ 1.5
64
+ ]
65
+ }
66
+ }
67
+ }
68
+ },
69
+ "revision": null,
70
+ "use_imagenet_stats": true,
71
+ "video_backend": "torchcodec"
72
+ },
73
+ "env": null,
74
+ "policy": {
75
+ "type": "smolvla",
76
+ "n_obs_steps": 1,
77
+ "normalization_mapping": {
78
+ "VISUAL": "IDENTITY",
79
+ "STATE": "MEAN_STD",
80
+ "ACTION": "MEAN_STD"
81
+ },
82
+ "input_features": {
83
+ "observation.state": {
84
+ "type": "STATE",
85
+ "shape": [
86
+ 6
87
+ ]
88
+ },
89
+ "observation.images.side": {
90
+ "type": "VISUAL",
91
+ "shape": [
92
+ 3,
93
+ 480,
94
+ 640
95
+ ]
96
+ },
97
+ "observation.images.wrist": {
98
+ "type": "VISUAL",
99
+ "shape": [
100
+ 3,
101
+ 480,
102
+ 640
103
+ ]
104
+ }
105
+ },
106
+ "output_features": {
107
+ "action": {
108
+ "type": "ACTION",
109
+ "shape": [
110
+ 6
111
+ ]
112
+ }
113
+ },
114
+ "device": "cuda",
115
+ "use_amp": false,
116
+ "push_to_hub": true,
117
+ "repo_id": "shuohsuan/svla_grasp0123",
118
+ "private": null,
119
+ "tags": null,
120
+ "license": null,
121
+ "chunk_size": 50,
122
+ "n_action_steps": 50,
123
+ "max_state_dim": 32,
124
+ "max_action_dim": 32,
125
+ "resize_imgs_with_padding": [
126
+ 512,
127
+ 512
128
+ ],
129
+ "empty_cameras": 0,
130
+ "adapt_to_pi_aloha": false,
131
+ "use_delta_joint_actions_aloha": false,
132
+ "tokenizer_max_length": 48,
133
+ "num_steps": 10,
134
+ "use_cache": true,
135
+ "freeze_vision_encoder": true,
136
+ "train_expert_only": true,
137
+ "train_state_proj": true,
138
+ "optimizer_lr": 0.0001,
139
+ "optimizer_betas": [
140
+ 0.9,
141
+ 0.95
142
+ ],
143
+ "optimizer_eps": 1e-08,
144
+ "optimizer_weight_decay": 1e-10,
145
+ "optimizer_grad_clip_norm": 10.0,
146
+ "scheduler_warmup_steps": 1000,
147
+ "scheduler_decay_steps": 30000,
148
+ "scheduler_decay_lr": 2.5e-06,
149
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
150
+ "load_vlm_weights": true,
151
+ "add_image_special_tokens": false,
152
+ "attention_mode": "cross_attn",
153
+ "prefix_length": 0,
154
+ "pad_language_to": "max_length",
155
+ "num_expert_layers": 0,
156
+ "num_vlm_layers": 16,
157
+ "self_attn_every_n_layers": 2,
158
+ "expert_width_multiplier": 0.75,
159
+ "min_period": 0.004,
160
+ "max_period": 4.0
161
+ },
162
+ "output_dir": "/mnt/storage/shuohsuan/train/svla_grasp0123",
163
+ "job_name": "svla_grasp_pos",
164
+ "resume": false,
165
+ "seed": 1000,
166
+ "num_workers": 4,
167
+ "batch_size": 64,
168
+ "steps": 20000,
169
+ "eval_freq": 20000,
170
+ "log_freq": 200,
171
+ "save_checkpoint": true,
172
+ "save_freq": 20000,
173
+ "use_policy_training_preset": true,
174
+ "optimizer": {
175
+ "type": "adamw",
176
+ "lr": 0.0001,
177
+ "weight_decay": 1e-10,
178
+ "grad_clip_norm": 10.0,
179
+ "betas": [
180
+ 0.9,
181
+ 0.95
182
+ ],
183
+ "eps": 1e-08
184
+ },
185
+ "scheduler": {
186
+ "type": "cosine_decay_with_warmup",
187
+ "num_warmup_steps": 1000,
188
+ "num_decay_steps": 30000,
189
+ "peak_lr": 0.0001,
190
+ "decay_lr": 2.5e-06
191
+ },
192
+ "eval": {
193
+ "n_episodes": 50,
194
+ "batch_size": 50,
195
+ "use_async_envs": false
196
+ },
197
+ "wandb": {
198
+ "enable": false,
199
+ "disable_artifact": false,
200
+ "project": "lerobot",
201
+ "entity": null,
202
+ "notes": null,
203
+ "run_id": null,
204
+ "mode": null
205
+ }
206
+ }