seunghoney commited on
Commit
1803e96
·
verified ·
1 Parent(s): 2729b2f

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +5 -5
  2. config.json +44 -24
  3. model.safetensors +2 -2
  4. train_config.json +56 -32
README.md CHANGED
@@ -2,20 +2,20 @@
2
  datasets: seunghoney/so101_test2
3
  library_name: lerobot
4
  license: apache-2.0
5
- model_name: act
6
  pipeline_tag: robotics
7
  tags:
8
- - lerobot
9
  - robotics
10
- - act
 
11
  ---
12
 
13
- # Model Card for act
14
 
15
  <!-- Provide a quick summary of what the model is/does. -->
16
 
17
 
18
- [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
19
 
20
 
21
  This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
 
2
  datasets: seunghoney/so101_test2
3
  library_name: lerobot
4
  license: apache-2.0
5
+ model_name: diffusion
6
  pipeline_tag: robotics
7
  tags:
 
8
  - robotics
9
+ - lerobot
10
+ - diffusion
11
  ---
12
 
13
+ # Model Card for diffusion
14
 
15
  <!-- Provide a quick summary of what the model is/does. -->
16
 
17
 
18
+ [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation.
19
 
20
 
21
  This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "type": "act",
3
- "n_obs_steps": 1,
4
  "normalization_mapping": {
5
  "VISUAL": "MEAN_STD",
6
- "STATE": "MEAN_STD",
7
- "ACTION": "MEAN_STD"
8
  },
9
  "input_features": {
10
  "observation.state": {
@@ -45,25 +45,45 @@
45
  "private": null,
46
  "tags": null,
47
  "license": null,
48
- "chunk_size": 100,
49
- "n_action_steps": 100,
 
50
  "vision_backbone": "resnet18",
51
- "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
52
- "replace_final_stride_with_dilation": false,
53
- "pre_norm": false,
54
- "dim_model": 512,
55
- "n_heads": 8,
56
- "dim_feedforward": 3200,
57
- "feedforward_activation": "relu",
58
- "n_encoder_layers": 4,
59
- "n_decoder_layers": 1,
60
- "use_vae": true,
61
- "latent_dim": 32,
62
- "n_vae_encoder_layers": 4,
63
- "temporal_ensemble_coeff": null,
64
- "dropout": 0.1,
65
- "kl_weight": 10.0,
66
- "optimizer_lr": 1e-05,
67
- "optimizer_weight_decay": 0.0001,
68
- "optimizer_lr_backbone": 1e-05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
 
1
  {
2
+ "type": "diffusion",
3
+ "n_obs_steps": 2,
4
  "normalization_mapping": {
5
  "VISUAL": "MEAN_STD",
6
+ "STATE": "MIN_MAX",
7
+ "ACTION": "MIN_MAX"
8
  },
9
  "input_features": {
10
  "observation.state": {
 
45
  "private": null,
46
  "tags": null,
47
  "license": null,
48
+ "horizon": 16,
49
+ "n_action_steps": 8,
50
+ "drop_n_last_frames": 7,
51
  "vision_backbone": "resnet18",
52
+ "crop_shape": [
53
+ 84,
54
+ 84
55
+ ],
56
+ "crop_is_random": true,
57
+ "pretrained_backbone_weights": null,
58
+ "use_group_norm": true,
59
+ "spatial_softmax_num_keypoints": 32,
60
+ "use_separate_rgb_encoder_per_camera": false,
61
+ "down_dims": [
62
+ 512,
63
+ 1024,
64
+ 2048
65
+ ],
66
+ "kernel_size": 5,
67
+ "n_groups": 8,
68
+ "diffusion_step_embed_dim": 128,
69
+ "use_film_scale_modulation": true,
70
+ "noise_scheduler_type": "DDPM",
71
+ "num_train_timesteps": 100,
72
+ "beta_schedule": "squaredcos_cap_v2",
73
+ "beta_start": 0.0001,
74
+ "beta_end": 0.02,
75
+ "prediction_type": "epsilon",
76
+ "clip_sample": true,
77
+ "clip_sample_range": 1.0,
78
+ "num_inference_steps": null,
79
+ "do_mask_loss_for_padding": false,
80
+ "optimizer_lr": 0.0001,
81
+ "optimizer_betas": [
82
+ 0.95,
83
+ 0.999
84
+ ],
85
+ "optimizer_eps": 1e-08,
86
+ "optimizer_weight_decay": 1e-06,
87
+ "scheduler_name": "cosine",
88
+ "scheduler_warmup_steps": 500
89
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fedf86b3090afa9068e97f25183927b140e1c843d99b9182a83d771d0f5e61a0
3
- size 206701064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91f1c922b45a0f8d5a15d7f40c704ef0c3c4731c699977ddd20b1812e96ed2aa
3
+ size 1066517736
train_config.json CHANGED
@@ -66,12 +66,12 @@
66
  },
67
  "env": null,
68
  "policy": {
69
- "type": "act",
70
- "n_obs_steps": 1,
71
  "normalization_mapping": {
72
  "VISUAL": "MEAN_STD",
73
- "STATE": "MEAN_STD",
74
- "ACTION": "MEAN_STD"
75
  },
76
  "input_features": {
77
  "observation.state": {
@@ -112,30 +112,50 @@
112
  "private": null,
113
  "tags": null,
114
  "license": null,
115
- "chunk_size": 100,
116
- "n_action_steps": 100,
 
117
  "vision_backbone": "resnet18",
118
- "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
119
- "replace_final_stride_with_dilation": false,
120
- "pre_norm": false,
121
- "dim_model": 512,
122
- "n_heads": 8,
123
- "dim_feedforward": 3200,
124
- "feedforward_activation": "relu",
125
- "n_encoder_layers": 4,
126
- "n_decoder_layers": 1,
127
- "use_vae": true,
128
- "latent_dim": 32,
129
- "n_vae_encoder_layers": 4,
130
- "temporal_ensemble_coeff": null,
131
- "dropout": 0.1,
132
- "kl_weight": 10.0,
133
- "optimizer_lr": 1e-05,
134
- "optimizer_weight_decay": 0.0001,
135
- "optimizer_lr_backbone": 1e-05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  },
137
- "output_dir": "outputs/train/act_so101_test",
138
- "job_name": "act_so101_test",
139
  "resume": false,
140
  "seed": 1000,
141
  "num_workers": 4,
@@ -147,17 +167,21 @@
147
  "save_freq": 20000,
148
  "use_policy_training_preset": true,
149
  "optimizer": {
150
- "type": "adamw",
151
- "lr": 1e-05,
152
- "weight_decay": 0.0001,
153
  "grad_clip_norm": 10.0,
154
  "betas": [
155
- 0.9,
156
  0.999
157
  ],
158
  "eps": 1e-08
159
  },
160
- "scheduler": null,
 
 
 
 
161
  "eval": {
162
  "n_episodes": 50,
163
  "batch_size": 50,
@@ -169,7 +193,7 @@
169
  "project": "lerobot",
170
  "entity": null,
171
  "notes": null,
172
- "run_id": "0f2ki7ez",
173
  "mode": null
174
  }
175
  }
 
66
  },
67
  "env": null,
68
  "policy": {
69
+ "type": "diffusion",
70
+ "n_obs_steps": 2,
71
  "normalization_mapping": {
72
  "VISUAL": "MEAN_STD",
73
+ "STATE": "MIN_MAX",
74
+ "ACTION": "MIN_MAX"
75
  },
76
  "input_features": {
77
  "observation.state": {
 
112
  "private": null,
113
  "tags": null,
114
  "license": null,
115
+ "horizon": 16,
116
+ "n_action_steps": 8,
117
+ "drop_n_last_frames": 7,
118
  "vision_backbone": "resnet18",
119
+ "crop_shape": [
120
+ 84,
121
+ 84
122
+ ],
123
+ "crop_is_random": true,
124
+ "pretrained_backbone_weights": null,
125
+ "use_group_norm": true,
126
+ "spatial_softmax_num_keypoints": 32,
127
+ "use_separate_rgb_encoder_per_camera": false,
128
+ "down_dims": [
129
+ 512,
130
+ 1024,
131
+ 2048
132
+ ],
133
+ "kernel_size": 5,
134
+ "n_groups": 8,
135
+ "diffusion_step_embed_dim": 128,
136
+ "use_film_scale_modulation": true,
137
+ "noise_scheduler_type": "DDPM",
138
+ "num_train_timesteps": 100,
139
+ "beta_schedule": "squaredcos_cap_v2",
140
+ "beta_start": 0.0001,
141
+ "beta_end": 0.02,
142
+ "prediction_type": "epsilon",
143
+ "clip_sample": true,
144
+ "clip_sample_range": 1.0,
145
+ "num_inference_steps": null,
146
+ "do_mask_loss_for_padding": false,
147
+ "optimizer_lr": 0.0001,
148
+ "optimizer_betas": [
149
+ 0.95,
150
+ 0.999
151
+ ],
152
+ "optimizer_eps": 1e-08,
153
+ "optimizer_weight_decay": 1e-06,
154
+ "scheduler_name": "cosine",
155
+ "scheduler_warmup_steps": 500
156
  },
157
+ "output_dir": "outputs/train/my_diffusion",
158
+ "job_name": "my_diffusion_training",
159
  "resume": false,
160
  "seed": 1000,
161
  "num_workers": 4,
 
167
  "save_freq": 20000,
168
  "use_policy_training_preset": true,
169
  "optimizer": {
170
+ "type": "adam",
171
+ "lr": 0.0001,
172
+ "weight_decay": 1e-06,
173
  "grad_clip_norm": 10.0,
174
  "betas": [
175
+ 0.95,
176
  0.999
177
  ],
178
  "eps": 1e-08
179
  },
180
+ "scheduler": {
181
+ "type": "diffuser",
182
+ "num_warmup_steps": 500,
183
+ "name": "cosine"
184
+ },
185
  "eval": {
186
  "n_episodes": 50,
187
  "batch_size": 50,
 
193
  "project": "lerobot",
194
  "entity": null,
195
  "notes": null,
196
+ "run_id": "p3y9zllt",
197
  "mode": null
198
  }
199
  }