Robotics
Safetensors
File size: 5,382 Bytes
da8c67a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
{"batch_size": 64,
 "dataset": {"episodes": null,
             "image_transforms": {"enable": false,
                                  "max_num_transforms": 3,
                                  "random_order": false,
                                  "tfs": {"brightness": {"kwargs": {"brightness": [0.8,
                                                                                   1.2]},
                                                         "type": "ColorJitter",
                                                         "weight": 1.0},
                                          "contrast": {"kwargs": {"contrast": [0.8,
                                                                               1.2]},
                                                       "type": "ColorJitter",
                                                       "weight": 1.0},
                                          "hue": {"kwargs": {"hue": [-0.05,
                                                                     0.05]},
                                                  "type": "ColorJitter",
                                                  "weight": 1.0},
                                          "saturation": {"kwargs": {"saturation": [0.5,
                                                                                   1.5]},
                                                         "type": "ColorJitter",
                                                         "weight": 1.0},
                                          "sharpness": {"kwargs": {"sharpness": [0.5,
                                                                                 1.5]},
                                                        "type": "SharpnessJitter",
                                                        "weight": 1.0}}},
             "repo_id": "florian-moyen/strawberries_merged",
             "revision": null,
             "root": null,
             "use_imagenet_stats": true,
             "video_backend": "torchcodec"},
 "env": null,
 "eval": {"batch_size": 50, "n_episodes": 50, "use_async_envs": false},
 "eval_freq": 20000,
 "job_name": "strawberries_job",
 "log_freq": 200,
 "num_workers": 4,
 "optimizer": {"betas": [0.9, 0.95],
               "eps": 1e-08,
               "grad_clip_norm": 10.0,
               "lr": 0.0001,
               "type": "adamw",
               "weight_decay": 1e-10},
 "output_dir": "outputs/train/strawberries_model",
 "policy": {"adapt_to_pi_aloha": false,
            "add_image_special_tokens": false,
            "attention_mode": "cross_attn",
            "chunk_size": 50,
            "device": "cuda",
            "empty_cameras": 0,
            "expert_width_multiplier": 0.75,
            "freeze_vision_encoder": true,
            "input_features": {"observation.image": {"shape": [3, 256, 256],
                                                     "type": "VISUAL"},
                               "observation.image2": {"shape": [3, 256, 256],
                                                      "type": "VISUAL"},
                               "observation.image3": {"shape": [3, 256, 256],
                                                      "type": "VISUAL"},
                               "observation.state": {"shape": [6],
                                                     "type": "STATE"}},
            "load_vlm_weights": true,
            "max_action_dim": 32,
            "max_period": 4.0,
            "max_state_dim": 32,
            "min_period": 0.004,
            "n_action_steps": 50,
            "n_obs_steps": 1,
            "normalization_mapping": {"ACTION": "MEAN_STD",
                                      "STATE": "MEAN_STD",
                                      "VISUAL": "IDENTITY"},
            "num_expert_layers": 0,
            "num_steps": 10,
            "num_vlm_layers": 16,
            "optimizer_betas": [0.9, 0.95],
            "optimizer_eps": 1e-08,
            "optimizer_grad_clip_norm": 10.0,
            "optimizer_lr": 0.0001,
            "optimizer_weight_decay": 1e-10,
            "output_features": {"action": {"shape": [6],
                                           "type": "ACTION"}},
            "pad_language_to": "max_length",
            "prefix_length": 0,
            "resize_imgs_with_padding": [512, 512],
            "scheduler_decay_lr": 2.5e-06,
            "scheduler_decay_steps": 30000,
            "scheduler_warmup_steps": 1000,
            "self_attn_every_n_layers": 2,
            "tokenizer_max_length": 48,
            "train_expert_only": true,
            "train_state_proj": true,
            "type": "smolvla",
            "use_amp": false,
            "use_cache": true,
            "use_delta_joint_actions_aloha": false,
            "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct"},
 "resume": false,
 "save_checkpoint": true,
 "save_freq": 20000,
 "scheduler": {"decay_lr": 2.5e-06,
               "num_decay_steps": 30000,
               "num_warmup_steps": 1000,
               "peak_lr": 0.0001,
               "type": "cosine_decay_with_warmup"},
 "seed": 1000,
 "steps": 20000,
 "use_policy_training_preset": true,
 "wandb": {"disable_artifact": false,
           "enable": true,
           "entity": null,
           "mode": null,
           "notes": null,
           "project": "lerobot",
           "run_id": null}}