Upload train_config.json with huggingface_hub
Browse files- train_config.json +35 -0
train_config.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_name": "HuggingFaceTB/SmolVLA-base",
|
| 3 |
+
"lora_rank": 64,
|
| 4 |
+
"lora_alpha": 128,
|
| 5 |
+
"lora_target_modules": [
|
| 6 |
+
"q_proj",
|
| 7 |
+
"v_proj"
|
| 8 |
+
],
|
| 9 |
+
"lora_dropout": 0.05,
|
| 10 |
+
"learning_rate": 0.0002,
|
| 11 |
+
"weight_decay": 0.01,
|
| 12 |
+
"batch_size": 2,
|
| 13 |
+
"num_epochs": 1,
|
| 14 |
+
"max_steps": 20,
|
| 15 |
+
"gradient_accumulation_steps": 8,
|
| 16 |
+
"warmup_ratio": 0.05,
|
| 17 |
+
"max_grad_norm": 1.0,
|
| 18 |
+
"eval_steps": 250,
|
| 19 |
+
"eval_episodes": 20,
|
| 20 |
+
"save_steps": 500,
|
| 21 |
+
"save_total_limit": 3,
|
| 22 |
+
"device": "mps",
|
| 23 |
+
"fp16": false,
|
| 24 |
+
"bf16": false,
|
| 25 |
+
"dataloader_num_workers": 0,
|
| 26 |
+
"gradient_checkpointing": true,
|
| 27 |
+
"output_dir": "checkpoints/v0/",
|
| 28 |
+
"logging_steps": 10,
|
| 29 |
+
"report_to": "none",
|
| 30 |
+
"seed": 42,
|
| 31 |
+
"dataset_path": "data/lerobot/dataset",
|
| 32 |
+
"train_split": 0.9,
|
| 33 |
+
"action_chunk_size": 50,
|
| 34 |
+
"control_frequency_hz": 10.0
|
| 35 |
+
}
|