steb6 commited on
Commit
c03e423
·
verified ·
1 Parent(s): 76c8f9b

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +62 -0
  2. config.json +71 -0
  3. model.safetensors +3 -0
  4. train_config.json +199 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: steb6/HOME
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: act
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - lerobot
9
+ - robotics
10
+ - act
11
+ ---
12
+
13
+ # Model Card for act
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
19
+
20
+
21
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
22
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
23
+
24
+ ---
25
+
26
+ ## How to Get Started with the Model
27
+
28
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
29
+ Below is the short version on how to train and run inference/eval:
30
+
31
+ ### Train from scratch
32
+
33
+ ```bash
34
+ lerobot-train \
35
+ --dataset.repo_id=${HF_USER}/<dataset> \
36
+ --policy.type=act \
37
+ --output_dir=outputs/train/<desired_policy_repo_id> \
38
+ --job_name=lerobot_training \
39
+ --policy.device=cuda \
40
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
41
+ --wandb.enable=true
42
+ ```
43
+
44
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
45
+
46
+ ### Evaluate the policy/run inference
47
+
48
+ ```bash
49
+ lerobot-record \
50
+ --robot.type=so100_follower \
51
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
52
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
53
+ --episodes=10
54
+ ```
55
+
56
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
57
+
58
+ ---
59
+
60
+ ## Model Details
61
+
62
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "input_features": {
5
+ "observation.state": {
6
+ "type": "STATE",
7
+ "shape": [
8
+ 36
9
+ ]
10
+ },
11
+ "observation.images.egocentric": {
12
+ "type": "VISUAL",
13
+ "shape": [
14
+ 3,
15
+ 480,
16
+ 640
17
+ ]
18
+ }
19
+ },
20
+ "output_features": {
21
+ "action": {
22
+ "type": "ACTION",
23
+ "shape": [
24
+ 36
25
+ ]
26
+ }
27
+ },
28
+ "device": "cuda",
29
+ "use_amp": false,
30
+ "use_peft": false,
31
+ "push_to_hub": true,
32
+ "repo_id": "steb6/act-HOME",
33
+ "private": null,
34
+ "tags": null,
35
+ "license": null,
36
+ "pretrained_path": null,
37
+ "chunk_size": 20,
38
+ "n_action_steps": 20,
39
+ "normalization_mapping": {
40
+ "VISUAL": "MEAN_STD",
41
+ "STATE": "MEAN_STD",
42
+ "ACTION": "MEAN_STD"
43
+ },
44
+ "vision_backbone": "resnet18",
45
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
46
+ "replace_final_stride_with_dilation": false,
47
+ "pre_norm": false,
48
+ "dim_model": 512,
49
+ "n_heads": 8,
50
+ "dim_feedforward": 3200,
51
+ "feedforward_activation": "relu",
52
+ "n_encoder_layers": 4,
53
+ "n_decoder_layers": 1,
54
+ "use_vae": true,
55
+ "latent_dim": 32,
56
+ "n_vae_encoder_layers": 4,
57
+ "temporal_ensemble_coeff": null,
58
+ "dropout": 0.1,
59
+ "kl_weight": 10.0,
60
+ "use_language_conditioning": false,
61
+ "language_encoder_type": "clip",
62
+ "language_model_name": "openai/clip-vit-base-patch32",
63
+ "freeze_language_encoder": true,
64
+ "language_projection_dim": null,
65
+ "language_dropout": 0.1,
66
+ "max_token_length": 77,
67
+ "language_pooling": "cls",
68
+ "optimizer_lr": 1e-05,
69
+ "optimizer_weight_decay": 0.0001,
70
+ "optimizer_lr_backbone": 1e-05
71
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2449b6012ab3e683083113124aaa1601f1437830c6587f736857cd5a61e2b38
3
+ size 206617960
train_config.json ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "steb6/HOME",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": true,
8
+ "max_num_transforms": 3,
9
+ "random_order": true,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec",
66
+ "streaming": false
67
+ },
68
+ "env": null,
69
+ "policy": {
70
+ "type": "act",
71
+ "n_obs_steps": 1,
72
+ "input_features": {
73
+ "observation.state": {
74
+ "type": "STATE",
75
+ "shape": [
76
+ 36
77
+ ]
78
+ },
79
+ "observation.images.egocentric": {
80
+ "type": "VISUAL",
81
+ "shape": [
82
+ 3,
83
+ 480,
84
+ 640
85
+ ]
86
+ }
87
+ },
88
+ "output_features": {
89
+ "action": {
90
+ "type": "ACTION",
91
+ "shape": [
92
+ 36
93
+ ]
94
+ }
95
+ },
96
+ "device": "cuda",
97
+ "use_amp": false,
98
+ "use_peft": false,
99
+ "push_to_hub": true,
100
+ "repo_id": "steb6/act-HOME",
101
+ "private": null,
102
+ "tags": null,
103
+ "license": null,
104
+ "pretrained_path": null,
105
+ "chunk_size": 20,
106
+ "n_action_steps": 20,
107
+ "normalization_mapping": {
108
+ "VISUAL": "MEAN_STD",
109
+ "STATE": "MEAN_STD",
110
+ "ACTION": "MEAN_STD"
111
+ },
112
+ "vision_backbone": "resnet18",
113
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
114
+ "replace_final_stride_with_dilation": false,
115
+ "pre_norm": false,
116
+ "dim_model": 512,
117
+ "n_heads": 8,
118
+ "dim_feedforward": 3200,
119
+ "feedforward_activation": "relu",
120
+ "n_encoder_layers": 4,
121
+ "n_decoder_layers": 1,
122
+ "use_vae": true,
123
+ "latent_dim": 32,
124
+ "n_vae_encoder_layers": 4,
125
+ "temporal_ensemble_coeff": null,
126
+ "dropout": 0.1,
127
+ "kl_weight": 10.0,
128
+ "use_language_conditioning": false,
129
+ "language_encoder_type": "clip",
130
+ "language_model_name": "openai/clip-vit-base-patch32",
131
+ "freeze_language_encoder": true,
132
+ "language_projection_dim": null,
133
+ "language_dropout": 0.1,
134
+ "max_token_length": 77,
135
+ "language_pooling": "cls",
136
+ "optimizer_lr": 1e-05,
137
+ "optimizer_weight_decay": 0.0001,
138
+ "optimizer_lr_backbone": 1e-05
139
+ },
140
+ "output_dir": "checkpoints/act_HOME",
141
+ "job_name": "act",
142
+ "resume": false,
143
+ "seed": 1000,
144
+ "num_workers": 4,
145
+ "batch_size": 8,
146
+ "steps": 100000,
147
+ "eval_freq": 20000,
148
+ "log_freq": 200,
149
+ "tolerance_s": 0.0001,
150
+ "save_checkpoint": true,
151
+ "save_freq": 20000,
152
+ "use_policy_training_preset": true,
153
+ "optimizer": {
154
+ "type": "adamw",
155
+ "lr": 1e-05,
156
+ "weight_decay": 0.0001,
157
+ "grad_clip_norm": 10.0,
158
+ "betas": [
159
+ 0.9,
160
+ 0.999
161
+ ],
162
+ "eps": 1e-08
163
+ },
164
+ "scheduler": null,
165
+ "eval": {
166
+ "n_episodes": 50,
167
+ "batch_size": 50,
168
+ "use_async_envs": false
169
+ },
170
+ "wandb": {
171
+ "enable": false,
172
+ "disable_artifact": false,
173
+ "project": "lerobot",
174
+ "entity": null,
175
+ "notes": null,
176
+ "run_id": null,
177
+ "mode": null
178
+ },
179
+ "peft": null,
180
+ "use_rabc": false,
181
+ "rabc_progress_path": null,
182
+ "rabc_kappa": 0.01,
183
+ "rabc_epsilon": 1e-06,
184
+ "rabc_head_mode": "sparse",
185
+ "rename_map": {},
186
+ "image_resize_size": [
187
+ 256,
188
+ 256
189
+ ],
190
+ "image_crop_params": {
191
+ "observation.images.egocentric": [
192
+ 0,
193
+ 80,
194
+ 480,
195
+ 480
196
+ ]
197
+ },
198
+ "checkpoint_path": null
199
+ }