File size: 4,290 Bytes
0beff28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
{
    "type": "xr1_stage2",
    "n_obs_steps": 2,
    "action_sample_factor": 1,
    "normalization_mapping": {
        "VISUAL": "IDENTITY",
        "STATE": "MEAN_STD",
        "ACTION": "MEAN_STD"
    },
    "input_features": {
        "observation.state": {
            "type": "STATE",
            "shape": [
                32
            ]
        },
        "observation.images.image_0": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.images.image_1": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.images.image_2": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.images.image_3": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.images.image_wrist_0": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.images.image_wrist_1": {
            "type": "VISUAL",
            "shape": [
                224,
                3,
                224
            ]
        },
        "observation.state_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_0_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_1_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_2_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_3_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_wrist_0_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        },
        "observation.images.image_wrist_1_is_pad": {
            "type": "STATE",
            "shape": [
                2
            ]
        }
    },
    "output_features": {
        "action": {
            "type": "ACTION",
            "shape": [
                32
            ]
        },
        "action_is_pad": {
            "type": "ACTION",
            "shape": [
                50
            ]
        }
    },
    "stage1_pretrained_path": "/media/jushen/bamboo-fan/Save/crossvla/stage1_4dataset_10_8_bs12_kl_cb256_0528_stat/checkpoints/275000/pretrained_model/",
    "stage2_pretrained_path": "None",
    "stage2_latent_image_token_check": false,
    "dataset_stats_generate": true,
    "heterogeneous": true,
    "split_dataset": true,
    "real_robot_dev": false,
    "image_interval_steps": 50,
    "action_latent_token_num": 13,
    "mformer_hidden_size": 768,
    "decoder_hidden_size": 768,
    "codebook_embed_dim": 256,
    "codebook_k_size": 256,
    "action_chunk_size": 50,
    "chunk_size": 50,
    "n_action_steps": 50,
    "resampler": true,
    "resampler_dim": 2048,
    "resampler_depth": 3,
    "resampler_dim_head": 128,
    "resampler_heads": 4,
    "resampler_num_media_embeds": 1,
    "resampler_num_latents": 9,
    "max_state_dim": 32,
    "max_action_dim": 32,
    "resize_imgs_with_padding": [
        224,
        224
    ],
    "empty_cameras": 0,
    "adapt_to_pi_aloha": false,
    "use_delta_joint_actions_aloha": false,
    "tokenizer_max_length": 48,
    "proj_width": 1024,
    "num_steps": 10,
    "use_cache": true,
    "attention_implementation": "eager",
    "freeze_vision_encoder": false,
    "freeze_language_encoder": true,
    "train_expert_only": false,
    "train_state_proj": true,
    "optimizer_lr": 0.0001,
    "optimizer_betas": [
        0.9,
        0.95
    ],
    "optimizer_eps": 1e-08,
    "optimizer_weight_decay": 1e-10,
    "scheduler_warmup_steps": 5000,
    "scheduler_decay_steps": 300000,
    "scheduler_decay_lr": 1e-06
}