anna4142 commited on
Commit
f1a7988
·
verified ·
1 Parent(s): f2cde90

hierarchical-decision-transformer-hopper

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. config.json +6 -3
  3. model.safetensors +2 -2
  4. training_args.bin +1 -1
README.md CHANGED
@@ -38,7 +38,7 @@ The following hyperparameters were used during training:
38
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
39
  - lr_scheduler_type: linear
40
  - lr_scheduler_warmup_ratio: 0.1
41
- - num_epochs: 200
42
 
43
  ### Training results
44
 
 
38
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
39
  - lr_scheduler_type: linear
40
  - lr_scheduler_warmup_ratio: 0.1
41
+ - num_epochs: 120
42
 
43
  ### Training results
44
 
config.json CHANGED
@@ -1,12 +1,13 @@
1
  {
2
- "act_dim": 6,
3
  "action_tanh": true,
4
  "activation_function": "relu",
5
  "architectures": [
6
- "TrainableDT"
7
  ],
8
  "attn_pdrop": 0.1,
9
  "bos_token_id": 50256,
 
10
  "embd_pdrop": 0.1,
11
  "eos_token_id": 50256,
12
  "hidden_size": 128,
@@ -14,6 +15,7 @@
14
  "layer_norm_epsilon": 1e-05,
15
  "max_ep_len": 4096,
16
  "model_type": "decision_transformer",
 
17
  "n_head": 1,
18
  "n_inner": null,
19
  "n_layer": 3,
@@ -22,9 +24,10 @@
22
  "resid_pdrop": 0.1,
23
  "scale_attn_by_inverse_layer_idx": false,
24
  "scale_attn_weights": true,
25
- "state_dim": 17,
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.46.2",
28
  "use_cache": true,
 
29
  "vocab_size": 1
30
  }
 
1
  {
2
+ "act_dim": 3,
3
  "action_tanh": true,
4
  "activation_function": "relu",
5
  "architectures": [
6
+ "HierarchicalTrainableDT"
7
  ],
8
  "attn_pdrop": 0.1,
9
  "bos_token_id": 50256,
10
+ "cluster_hidden_dim": 128,
11
  "embd_pdrop": 0.1,
12
  "eos_token_id": 50256,
13
  "hidden_size": 128,
 
15
  "layer_norm_epsilon": 1e-05,
16
  "max_ep_len": 4096,
17
  "model_type": "decision_transformer",
18
+ "n_clusters": 8,
19
  "n_head": 1,
20
  "n_inner": null,
21
  "n_layer": 3,
 
24
  "resid_pdrop": 0.1,
25
  "scale_attn_by_inverse_layer_idx": false,
26
  "scale_attn_weights": true,
27
+ "state_dim": 11,
28
  "torch_dtype": "float32",
29
  "transformers_version": "4.46.2",
30
  "use_cache": true,
31
+ "use_subgoal_weighting": true,
32
  "vocab_size": 1
33
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f9fefae1efcb89df5ef5bdc1b71b5ace0041243c894f08e3dbb7fa8995997a2
3
- size 5034448
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1594b23cce583700d3ad77126a99316cd24a114cb910cfa0c02403861153946
3
+ size 5036908
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88c3d511ad5dfde27e4489c7390c4d7926683ad0a7458b50c4ebfc8f44f1f29c
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b450a94cc940aa7ec181d60e07338eced46b079a4c4d822ad6444a2bd91a472
3
  size 5240