mhered commited on
Commit
ec8130b
·
verified ·
1 Parent(s): 1c5668c

Upload last checkpoint

Browse files
Files changed (3) hide show
  1. config.json +2 -2
  2. model.safetensors +1 -1
  3. train_config.json +6 -6
config.json CHANGED
@@ -40,7 +40,7 @@
40
  "private": null,
41
  "tags": null,
42
  "license": null,
43
- "pretrained_path": null,
44
  "chunk_size": 50,
45
  "n_action_steps": 50,
46
  "normalization_mapping": {
@@ -70,7 +70,7 @@
70
  ],
71
  "optimizer_eps": 1e-08,
72
  "optimizer_weight_decay": 1e-10,
73
- "optimizer_grad_clip_norm": 10,
74
  "scheduler_warmup_steps": 1000,
75
  "scheduler_decay_steps": 30000,
76
  "scheduler_decay_lr": 2.5e-06,
 
40
  "private": null,
41
  "tags": null,
42
  "license": null,
43
+ "pretrained_path": "outputs/train/smolvla_so100_pato_test1/checkpoints/last/pretrained_model",
44
  "chunk_size": 50,
45
  "n_action_steps": 50,
46
  "normalization_mapping": {
 
70
  ],
71
  "optimizer_eps": 1e-08,
72
  "optimizer_weight_decay": 1e-10,
73
+ "optimizer_grad_clip_norm": 10.0,
74
  "scheduler_warmup_steps": 1000,
75
  "scheduler_decay_steps": 30000,
76
  "scheduler_decay_lr": 2.5e-06,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5eebfb267bbdb111fdd0e517d7997d89dc16219b8c3f3c30403bc95eec7ecb5
3
  size 1197789224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c44f802d941eb2e8c8f6f0834843ea4f747de4c0ddf7efaf1506175fb60a2b7
3
  size 1197789224
train_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "root": null,
5
  "episodes": null,
6
  "image_transforms": {
7
- "enable": false,
8
  "max_num_transforms": 3,
9
  "random_order": false,
10
  "tfs": {
@@ -108,7 +108,7 @@
108
  "private": null,
109
  "tags": null,
110
  "license": null,
111
- "pretrained_path": null,
112
  "chunk_size": 50,
113
  "n_action_steps": 50,
114
  "normalization_mapping": {
@@ -138,7 +138,7 @@
138
  ],
139
  "optimizer_eps": 1e-08,
140
  "optimizer_weight_decay": 1e-10,
141
- "optimizer_grad_clip_norm": 10,
142
  "scheduler_warmup_steps": 1000,
143
  "scheduler_decay_steps": 30000,
144
  "scheduler_decay_lr": 2.5e-06,
@@ -157,11 +157,11 @@
157
  },
158
  "output_dir": "outputs/train/smolvla_so100_pato_test1",
159
  "job_name": "smolvla_so100_pato_test",
160
- "resume": false,
161
  "seed": 1000,
162
  "num_workers": 4,
163
  "batch_size": 64,
164
- "steps": 4000,
165
  "eval_freq": 2000,
166
  "log_freq": 200,
167
  "save_checkpoint": true,
@@ -171,7 +171,7 @@
171
  "type": "adamw",
172
  "lr": 0.0001,
173
  "weight_decay": 1e-10,
174
- "grad_clip_norm": 10,
175
  "betas": [
176
  0.9,
177
  0.95
 
4
  "root": null,
5
  "episodes": null,
6
  "image_transforms": {
7
+ "enable": true,
8
  "max_num_transforms": 3,
9
  "random_order": false,
10
  "tfs": {
 
108
  "private": null,
109
  "tags": null,
110
  "license": null,
111
+ "pretrained_path": "outputs/train/smolvla_so100_pato_test1/checkpoints/last/pretrained_model",
112
  "chunk_size": 50,
113
  "n_action_steps": 50,
114
  "normalization_mapping": {
 
138
  ],
139
  "optimizer_eps": 1e-08,
140
  "optimizer_weight_decay": 1e-10,
141
+ "optimizer_grad_clip_norm": 10.0,
142
  "scheduler_warmup_steps": 1000,
143
  "scheduler_decay_steps": 30000,
144
  "scheduler_decay_lr": 2.5e-06,
 
157
  },
158
  "output_dir": "outputs/train/smolvla_so100_pato_test1",
159
  "job_name": "smolvla_so100_pato_test",
160
+ "resume": true,
161
  "seed": 1000,
162
  "num_workers": 4,
163
  "batch_size": 64,
164
+ "steps": 6000,
165
  "eval_freq": 2000,
166
  "log_freq": 200,
167
  "save_checkpoint": true,
 
171
  "type": "adamw",
172
  "lr": 0.0001,
173
  "weight_decay": 1e-10,
174
+ "grad_clip_norm": 10.0,
175
  "betas": [
176
  0.9,
177
  0.95