huiwon commited on
Commit
5e66157
·
verified ·
1 Parent(s): 898de24

Add config.json

Browse files
Files changed (1) hide show
  1. config.json +103 -0
config.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_dim": 32,
3
+ "action_head_cfg": {
4
+ "action_dim": 32,
5
+ "action_horizon": 16,
6
+ "add_pos_embed": true,
7
+ "attn_type": "joint_attn_v2",
8
+ "backbone_embedding_dim": 4096,
9
+ "base_freq": 50.0,
10
+ "dct_loss_weight": 0.0,
11
+ "diffusion_model_cfg": {
12
+ "attention_head_dim": 64,
13
+ "cross_attention_dim": 4096,
14
+ "depth": 4,
15
+ "depth_single_blocks": 8,
16
+ "direct_visual_conditioning": null,
17
+ "disable_time_token_pos_emb": false,
18
+ "dropout": 0.2,
19
+ "final_dropout": true,
20
+ "interleave_self_attention": true,
21
+ "meta_queries_as_modality": false,
22
+ "norm_type": "ada_norm",
23
+ "num_attention_heads": 24,
24
+ "num_layers": 16,
25
+ "num_temb_tokens": 1,
26
+ "output_dim": 1024,
27
+ "positional_embeddings": "rope_sa_only",
28
+ "rope_theta": 10000.0,
29
+ "state_as_modality": false,
30
+ "temb_type": "additional_token",
31
+ "use_alternate_vl_conditioning": false,
32
+ "use_swiglu": true
33
+ },
34
+ "discretize_timesteps": false,
35
+ "ff_loss_weight": 0.0,
36
+ "flow_matching_loss_weight": 1.0,
37
+ "hidden_size": 1024,
38
+ "input_embedding_dim": 1536,
39
+ "max_action_dim": 32,
40
+ "max_num_embodiments": 32,
41
+ "max_state_dim": 64,
42
+ "model_dct": false,
43
+ "model_dtype": "float32",
44
+ "noise_beta_alpha": 1.5,
45
+ "noise_beta_beta": 1.0,
46
+ "noise_s": 0.999,
47
+ "num_inference_timesteps": 4,
48
+ "num_target_vision_tokens": 32,
49
+ "num_timestep_buckets": 1000,
50
+ "post_norm": "none",
51
+ "pre_norm": "layer_norm",
52
+ "qk_rmsnorm": false,
53
+ "remove_bias": false,
54
+ "tune_diffusion_model": true,
55
+ "tune_projector": true,
56
+ "use_future_tokens": false,
57
+ "use_qknorm": true,
58
+ "use_rmsnorm": true,
59
+ "use_vlln": false,
60
+ "vl_self_attention_cfg": {
61
+ "attention_head_dim": 64,
62
+ "dropout": 0.2,
63
+ "final_dropout": true,
64
+ "num_attention_heads": 64,
65
+ "num_layers": 4,
66
+ "positional_embeddings": null
67
+ },
68
+ "x_prediction": false
69
+ },
70
+ "action_horizon": 16,
71
+ "architectures": [
72
+ "GR00T_N1_5"
73
+ ],
74
+ "attn_implementation": null,
75
+ "backbone_cfg": {
76
+ "load_bf16": false,
77
+ "meta_queries_mode": "full",
78
+ "n_meta_queries": 4,
79
+ "project_to_dim": null,
80
+ "qwen_path": "/fsx/alinvla/AlinVLA-VLM/checkpoints/robot_vqa_v2/checkpoint-7647",
81
+ "reproject_vision": false,
82
+ "select_layer": 18,
83
+ "tune_llm": false,
84
+ "tune_visual": false,
85
+ "use_causal_mask": true,
86
+ "use_flash_attention": true,
87
+ "use_meta_queries": false
88
+ },
89
+ "backbone_model_type": "qwen3_vl_8b",
90
+ "compute_dtype": "bfloat16",
91
+ "dtype": "bfloat16",
92
+ "hidden_size": 2048,
93
+ "lap_cfg": {},
94
+ "max_action_dim": 32,
95
+ "model_dtype": "float32",
96
+ "model_type": "gr00t_n1_5",
97
+ "transformers_version": "5.0.0.dev0",
98
+ "tune_diffusion_model": true,
99
+ "tune_llm": false,
100
+ "tune_projector": true,
101
+ "tune_visual": false,
102
+ "use_cache": false
103
+ }