Orellius commited on
Commit
646199f
·
verified ·
1 Parent(s): 211936f

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +63 -0
  2. config.json +4 -4
  3. model.safetensors +1 -1
  4. train_config.json +10 -10
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: lerobot/smolvla_base
3
+ datasets: hubnemo/so101_sort
4
+ library_name: lerobot
5
+ license: apache-2.0
6
+ model_name: smolvla
7
+ pipeline_tag: robotics
8
+ tags:
9
+ - lerobot
10
+ - robotics
11
+ - smolvla
12
+ ---
13
+
14
+ # Model Card for smolvla
15
+
16
+ <!-- Provide a quick summary of what the model is/does. -->
17
+
18
+
19
+ [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware.
20
+
21
+
22
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
23
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
24
+
25
+ ---
26
+
27
+ ## How to Get Started with the Model
28
+
29
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
30
+ Below is the short version on how to train and run inference/eval:
31
+
32
+ ### Train from scratch
33
+
34
+ ```bash
35
+ lerobot-train \
36
+ --dataset.repo_id=${HF_USER}/<dataset> \
37
+ --policy.type=act \
38
+ --output_dir=outputs/train/<desired_policy_repo_id> \
39
+ --job_name=lerobot_training \
40
+ --policy.device=cuda \
41
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
42
+ --wandb.enable=true
43
+ ```
44
+
45
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
46
+
47
+ ### Evaluate the policy/run inference
48
+
49
+ ```bash
50
+ lerobot-record \
51
+ --robot.type=so100_follower \
52
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
53
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
54
+ --episodes=10
55
+ ```
56
+
57
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
58
+
59
+ ---
60
+
61
+ ## Model Details
62
+
63
+ - **License:** apache-2.0
config.json CHANGED
@@ -28,8 +28,8 @@
28
  "device": "cuda",
29
  "use_amp": false,
30
  "use_peft": false,
31
- "push_to_hub": false,
32
- "repo_id": null,
33
  "private": null,
34
  "tags": null,
35
  "license": null,
@@ -64,8 +64,8 @@
64
  "optimizer_eps": 1e-08,
65
  "optimizer_weight_decay": 1e-10,
66
  "optimizer_grad_clip_norm": 10,
67
- "scheduler_warmup_steps": 50,
68
- "scheduler_decay_steps": 1000,
69
  "scheduler_decay_lr": 2.5e-06,
70
  "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
71
  "load_vlm_weights": false,
 
28
  "device": "cuda",
29
  "use_amp": false,
30
  "use_peft": false,
31
+ "push_to_hub": true,
32
+ "repo_id": "orellius/so101_sort_smolvla",
33
  "private": null,
34
  "tags": null,
35
  "license": null,
 
64
  "optimizer_eps": 1e-08,
65
  "optimizer_weight_decay": 1e-10,
66
  "optimizer_grad_clip_norm": 10,
67
+ "scheduler_warmup_steps": 250,
68
+ "scheduler_decay_steps": 5000,
69
  "scheduler_decay_lr": 2.5e-06,
70
  "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
71
  "load_vlm_weights": false,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ee376e8da064e11186d3977cd1477b49cb1e43a5b8fdadaa89bc93fd97bc277
3
  size 1197789224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55bdd30528904dfb97a97a40f84f26e322a35fa7d8d6533594e0c90d1da0b592
3
  size 1197789224
train_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "dataset": {
3
  "repo_id": "hubnemo/so101_sort",
4
- "root": "datasets/so101_matchbox_reward_fpv_less_bias",
5
  "episodes": null,
6
  "image_transforms": {
7
  "enable": false,
@@ -96,8 +96,8 @@
96
  "device": "cuda",
97
  "use_amp": false,
98
  "use_peft": false,
99
- "push_to_hub": false,
100
- "repo_id": null,
101
  "private": null,
102
  "tags": null,
103
  "license": null,
@@ -132,8 +132,8 @@
132
  "optimizer_eps": 1e-08,
133
  "optimizer_weight_decay": 1e-10,
134
  "optimizer_grad_clip_norm": 10,
135
- "scheduler_warmup_steps": 50,
136
- "scheduler_decay_steps": 1000,
137
  "scheduler_decay_lr": 2.5e-06,
138
  "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
139
  "load_vlm_weights": false,
@@ -148,13 +148,13 @@
148
  "min_period": 0.004,
149
  "max_period": 4.0
150
  },
151
- "output_dir": "outputs/train/so101_sort_so101_sort_smolvla_20251107_2009",
152
- "job_name": "so101_sort_so101_sort_smolvla_20251107_2009",
153
  "resume": false,
154
  "seed": 1000,
155
  "num_workers": 4,
156
  "batch_size": 4,
157
- "steps": 1000,
158
  "eval_freq": 200,
159
  "log_freq": 100,
160
  "save_checkpoint": true,
@@ -173,8 +173,8 @@
173
  },
174
  "scheduler": {
175
  "type": "cosine_decay_with_warmup",
176
- "num_warmup_steps": 50,
177
- "num_decay_steps": 1000,
178
  "peak_lr": 0.0003,
179
  "decay_lr": 2.5e-06
180
  },
 
1
  {
2
  "dataset": {
3
  "repo_id": "hubnemo/so101_sort",
4
+ "root": "datasets/so101_sort",
5
  "episodes": null,
6
  "image_transforms": {
7
  "enable": false,
 
96
  "device": "cuda",
97
  "use_amp": false,
98
  "use_peft": false,
99
+ "push_to_hub": true,
100
+ "repo_id": "orellius/so101_sort_smolvla",
101
  "private": null,
102
  "tags": null,
103
  "license": null,
 
132
  "optimizer_eps": 1e-08,
133
  "optimizer_weight_decay": 1e-10,
134
  "optimizer_grad_clip_norm": 10,
135
+ "scheduler_warmup_steps": 250,
136
+ "scheduler_decay_steps": 5000,
137
  "scheduler_decay_lr": 2.5e-06,
138
  "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
139
  "load_vlm_weights": false,
 
148
  "min_period": 0.004,
149
  "max_period": 4.0
150
  },
151
+ "output_dir": "outputs/train/so101_sort_so101_sort_smolvla_20251107_2019",
152
+ "job_name": "so101_sort_so101_sort_smolvla_20251107_2019",
153
  "resume": false,
154
  "seed": 1000,
155
  "num_workers": 4,
156
  "batch_size": 4,
157
+ "steps": 5000,
158
  "eval_freq": 200,
159
  "log_freq": 100,
160
  "save_checkpoint": true,
 
173
  },
174
  "scheduler": {
175
  "type": "cosine_decay_with_warmup",
176
+ "num_warmup_steps": 250,
177
+ "num_decay_steps": 5000,
178
  "peak_lr": 0.0003,
179
  "decay_lr": 2.5e-06
180
  },