Robotics
LeRobot
Safetensors
act
ac-pate commited on
Commit
60e2b55
·
verified ·
1 Parent(s): 573756a

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +68 -0
  2. config.json +77 -0
  3. model.safetensors +3 -0
  4. train_config.json +190 -0
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - Batonchegg/bimanual_blue_block_handover_1
4
+ - Batonchegg/bimanual_blue_block_handover_2
5
+ - Batonchegg/bimanual_blue_block_handover_3
6
+ - Batonchegg/bimanual_blue_block_handover_4
7
+ - Batonchegg/bimanual_blue_block_handover_5
8
+ - Batonchegg/bimanual_blue_block_handover_6
9
+ library_name: lerobot
10
+ license: apache-2.0
11
+ model_name: act
12
+ pipeline_tag: robotics
13
+ tags:
14
+ - robotics
15
+ - lerobot
16
+ - act
17
+ ---
18
+
19
+ # Model Card for act
20
+
21
+ <!-- Provide a quick summary of what the model is/does. -->
22
+
23
+
24
+ [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
25
+
26
+
27
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
28
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
29
+
30
+ ---
31
+
32
+ ## How to Get Started with the Model
33
+
34
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
35
+ Below is the short version on how to train and run inference/eval:
36
+
37
+ ### Train from scratch
38
+
39
+ ```bash
40
+ python -m lerobot.scripts.train \
41
+ --dataset.repo_id=${HF_USER}/<dataset> \
42
+ --policy.type=act \
43
+ --output_dir=outputs/train/<desired_policy_repo_id> \
44
+ --job_name=lerobot_training \
45
+ --policy.device=cuda \
46
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
47
+ --wandb.enable=true
48
+ ```
49
+
50
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
51
+
52
+ ### Evaluate the policy/run inference
53
+
54
+ ```bash
55
+ python -m lerobot.record \
56
+ --robot.type=so100_follower \
57
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
58
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
59
+ --episodes=10
60
+ ```
61
+
62
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
63
+
64
+ ---
65
+
66
+ ## Model Details
67
+
68
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 12
14
+ ]
15
+ },
16
+ "observation.images.wrist_right": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ },
24
+ "observation.images.wrist_left": {
25
+ "type": "VISUAL",
26
+ "shape": [
27
+ 3,
28
+ 480,
29
+ 640
30
+ ]
31
+ },
32
+ "observation.images.realsense_top": {
33
+ "type": "VISUAL",
34
+ "shape": [
35
+ 3,
36
+ 480,
37
+ 640
38
+ ]
39
+ }
40
+ },
41
+ "output_features": {
42
+ "action": {
43
+ "type": "ACTION",
44
+ "shape": [
45
+ 12
46
+ ]
47
+ }
48
+ },
49
+ "device": "cuda",
50
+ "use_amp": false,
51
+ "push_to_hub": true,
52
+ "repo_id": "Mimic-Robotics/act_nyquist_bimanual_handover",
53
+ "private": null,
54
+ "tags": null,
55
+ "license": null,
56
+ "chunk_size": 100,
57
+ "n_action_steps": 100,
58
+ "vision_backbone": "resnet18",
59
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
60
+ "replace_final_stride_with_dilation": false,
61
+ "pre_norm": false,
62
+ "dim_model": 512,
63
+ "n_heads": 8,
64
+ "dim_feedforward": 3200,
65
+ "feedforward_activation": "relu",
66
+ "n_encoder_layers": 4,
67
+ "n_decoder_layers": 1,
68
+ "use_vae": true,
69
+ "latent_dim": 32,
70
+ "n_vae_encoder_layers": 4,
71
+ "temporal_ensemble_coeff": null,
72
+ "dropout": 0.1,
73
+ "kl_weight": 10.0,
74
+ "optimizer_lr": 1e-05,
75
+ "optimizer_weight_decay": 0.0001,
76
+ "optimizer_lr_backbone": 1e-05
77
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc0aa782922570ed97581038bb2340b3d892a14bc7faba1f83ea97d6b86862f9
3
+ size 206750712
train_config.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": [
4
+ "Batonchegg/bimanual_blue_block_handover_1",
5
+ "Batonchegg/bimanual_blue_block_handover_2",
6
+ "Batonchegg/bimanual_blue_block_handover_3",
7
+ "Batonchegg/bimanual_blue_block_handover_4",
8
+ "Batonchegg/bimanual_blue_block_handover_5",
9
+ "Batonchegg/bimanual_blue_block_handover_6"
10
+ ],
11
+ "root": null,
12
+ "episodes": null,
13
+ "image_transforms": {
14
+ "enable": false,
15
+ "max_num_transforms": 3,
16
+ "random_order": false,
17
+ "tfs": {
18
+ "brightness": {
19
+ "weight": 1.0,
20
+ "type": "ColorJitter",
21
+ "kwargs": {
22
+ "brightness": [
23
+ 0.8,
24
+ 1.2
25
+ ]
26
+ }
27
+ },
28
+ "contrast": {
29
+ "weight": 1.0,
30
+ "type": "ColorJitter",
31
+ "kwargs": {
32
+ "contrast": [
33
+ 0.8,
34
+ 1.2
35
+ ]
36
+ }
37
+ },
38
+ "saturation": {
39
+ "weight": 1.0,
40
+ "type": "ColorJitter",
41
+ "kwargs": {
42
+ "saturation": [
43
+ 0.5,
44
+ 1.5
45
+ ]
46
+ }
47
+ },
48
+ "hue": {
49
+ "weight": 1.0,
50
+ "type": "ColorJitter",
51
+ "kwargs": {
52
+ "hue": [
53
+ -0.05,
54
+ 0.05
55
+ ]
56
+ }
57
+ },
58
+ "sharpness": {
59
+ "weight": 1.0,
60
+ "type": "SharpnessJitter",
61
+ "kwargs": {
62
+ "sharpness": [
63
+ 0.5,
64
+ 1.5
65
+ ]
66
+ }
67
+ }
68
+ }
69
+ },
70
+ "revision": null,
71
+ "use_imagenet_stats": true,
72
+ "video_backend": "torchcodec"
73
+ },
74
+ "env": null,
75
+ "policy": {
76
+ "type": "act",
77
+ "n_obs_steps": 1,
78
+ "normalization_mapping": {
79
+ "VISUAL": "MEAN_STD",
80
+ "STATE": "MEAN_STD",
81
+ "ACTION": "MEAN_STD"
82
+ },
83
+ "input_features": {
84
+ "observation.state": {
85
+ "type": "STATE",
86
+ "shape": [
87
+ 12
88
+ ]
89
+ },
90
+ "observation.images.wrist_right": {
91
+ "type": "VISUAL",
92
+ "shape": [
93
+ 3,
94
+ 480,
95
+ 640
96
+ ]
97
+ },
98
+ "observation.images.wrist_left": {
99
+ "type": "VISUAL",
100
+ "shape": [
101
+ 3,
102
+ 480,
103
+ 640
104
+ ]
105
+ },
106
+ "observation.images.realsense_top": {
107
+ "type": "VISUAL",
108
+ "shape": [
109
+ 3,
110
+ 480,
111
+ 640
112
+ ]
113
+ }
114
+ },
115
+ "output_features": {
116
+ "action": {
117
+ "type": "ACTION",
118
+ "shape": [
119
+ 12
120
+ ]
121
+ }
122
+ },
123
+ "device": "cuda",
124
+ "use_amp": false,
125
+ "push_to_hub": true,
126
+ "repo_id": "Mimic-Robotics/act_nyquist_bimanual_handover",
127
+ "private": null,
128
+ "tags": null,
129
+ "license": null,
130
+ "chunk_size": 100,
131
+ "n_action_steps": 100,
132
+ "vision_backbone": "resnet18",
133
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
134
+ "replace_final_stride_with_dilation": false,
135
+ "pre_norm": false,
136
+ "dim_model": 512,
137
+ "n_heads": 8,
138
+ "dim_feedforward": 3200,
139
+ "feedforward_activation": "relu",
140
+ "n_encoder_layers": 4,
141
+ "n_decoder_layers": 1,
142
+ "use_vae": true,
143
+ "latent_dim": 32,
144
+ "n_vae_encoder_layers": 4,
145
+ "temporal_ensemble_coeff": null,
146
+ "dropout": 0.1,
147
+ "kl_weight": 10.0,
148
+ "optimizer_lr": 1e-05,
149
+ "optimizer_weight_decay": 0.0001,
150
+ "optimizer_lr_backbone": 1e-05
151
+ },
152
+ "output_dir": "outputs/train/act_nyquist_Bimanual_Handover_MultiDatasetTraining",
153
+ "job_name": "act_nyquist_Bimanual_Handover_MultiDatasetTraining",
154
+ "resume": false,
155
+ "seed": 1000,
156
+ "num_workers": 6,
157
+ "batch_size": 12,
158
+ "steps": 100000,
159
+ "eval_freq": 20000,
160
+ "log_freq": 200,
161
+ "save_checkpoint": true,
162
+ "save_freq": 20000,
163
+ "use_policy_training_preset": true,
164
+ "optimizer": {
165
+ "type": "adamw",
166
+ "lr": 1e-05,
167
+ "weight_decay": 0.0001,
168
+ "grad_clip_norm": 10.0,
169
+ "betas": [
170
+ 0.9,
171
+ 0.999
172
+ ],
173
+ "eps": 1e-08
174
+ },
175
+ "scheduler": null,
176
+ "eval": {
177
+ "n_episodes": 50,
178
+ "batch_size": 50,
179
+ "use_async_envs": false
180
+ },
181
+ "wandb": {
182
+ "enable": true,
183
+ "disable_artifact": false,
184
+ "project": "lerobot",
185
+ "entity": null,
186
+ "notes": "Multi-dataset training on 6 bimanual handover datasets - act on nyquist",
187
+ "run_id": "97va5wep",
188
+ "mode": null
189
+ }
190
+ }