philperceptron commited on
Commit
0067217
·
1 Parent(s): 6cdd5dd

remove sequential

Browse files
added_tokens.json DELETED
@@ -1,28 +0,0 @@
1
- {
2
- "</think>": 151668,
3
- "</tool_call>": 151658,
4
- "</tool_response>": 151666,
5
- "<think>": 151667,
6
- "<tool_call>": 151657,
7
- "<tool_response>": 151665,
8
- "<|box_end|>": 151649,
9
- "<|box_start|>": 151648,
10
- "<|endoftext|>": 151643,
11
- "<|file_sep|>": 151664,
12
- "<|fim_middle|>": 151660,
13
- "<|fim_pad|>": 151662,
14
- "<|fim_prefix|>": 151659,
15
- "<|fim_suffix|>": 151661,
16
- "<|im_end|>": 151645,
17
- "<|im_start|>": 151644,
18
- "<|image_pad|>": 151655,
19
- "<|object_ref_end|>": 151647,
20
- "<|object_ref_start|>": 151646,
21
- "<|quad_end|>": 151651,
22
- "<|quad_start|>": 151650,
23
- "<|repo_name|>": 151663,
24
- "<|video_pad|>": 151656,
25
- "<|vision_end|>": 151653,
26
- "<|vision_pad|>": 151654,
27
- "<|vision_start|>": 151652
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -1,12 +1,4 @@
1
  {
2
- "_rope_parameters": {
3
- "rope_theta": 1000000,
4
- "rope_type": "default"
5
- },
6
- "_rope_scaling": {
7
- "rope_theta": 1000000,
8
- "rope_type": "default"
9
- },
10
  "architectures": [
11
  "IsaacForConditionalGeneration"
12
  ],
@@ -63,9 +55,14 @@
63
  "num_key_value_heads": 8,
64
  "pixel_shuffle_scale": 2,
65
  "rms_norm_eps": 1e-06,
 
 
 
 
66
  "rope_theta": 1000000,
67
  "sliding_window": null,
68
  "text_config": {
 
69
  "architectures": [
70
  "IsaacForConditionalGeneration"
71
  ],
 
1
  {
 
 
 
 
 
 
 
 
2
  "architectures": [
3
  "IsaacForConditionalGeneration"
4
  ],
 
55
  "num_key_value_heads": 8,
56
  "pixel_shuffle_scale": 2,
57
  "rms_norm_eps": 1e-06,
58
+ "rope_parameters": {
59
+ "rope_theta": 1000000,
60
+ "rope_type": "default"
61
+ },
62
  "rope_theta": 1000000,
63
  "sliding_window": null,
64
  "text_config": {
65
+ "_name_or_path": "/tmp/qwen3_temp__thn86uc/hf-checkpoint",
66
  "architectures": [
67
  "IsaacForConditionalGeneration"
68
  ],
merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00003.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:53b74a3693ac1b601e6312f3ac0eeb569371d112d732aae687802f3d5e2f5088
3
- size 4969541832
 
 
 
 
model-00002-of-00003.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7730a05aca361a14fb147ba701443df46a2a48ecab7aeb30359cb284a9eb9d9
3
- size 4054193816
 
 
 
 
model-00003-of-00003.safetensors → model.safetensors RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6941d35ff1feae1603946f8746a71205bb86343b57968402df2e737faf9258a2
3
- size 1244659840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de0ee41ebb1e9d9d4cb295b9ea2f7e2b2e2ab05c60cc3de2ffd77a59c7a8d61
3
+ size 10268395696
model.safetensors.index.json DELETED
@@ -1,758 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_parameters": 2567073008,
4
- "total_size": 10268292032
5
- },
6
- "weight_map": {
7
- "lm_head.weight": "model-00003-of-00003.safetensors",
8
- "model.text_model.embed_tokens.weight": "model-00001-of-00003.safetensors",
9
- "model.text_model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
10
- "model.text_model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
11
- "model.text_model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
12
- "model.text_model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
13
- "model.text_model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
14
- "model.text_model.layers.0.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
15
- "model.text_model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
16
- "model.text_model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
17
- "model.text_model.layers.0.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
18
- "model.text_model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
19
- "model.text_model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
20
- "model.text_model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
21
- "model.text_model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
22
- "model.text_model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
23
- "model.text_model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
24
- "model.text_model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
25
- "model.text_model.layers.1.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
26
- "model.text_model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
27
- "model.text_model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
28
- "model.text_model.layers.1.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
29
- "model.text_model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
30
- "model.text_model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
31
- "model.text_model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
32
- "model.text_model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
33
- "model.text_model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
34
- "model.text_model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
35
- "model.text_model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
36
- "model.text_model.layers.10.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
37
- "model.text_model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
38
- "model.text_model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
39
- "model.text_model.layers.10.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
40
- "model.text_model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
41
- "model.text_model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
42
- "model.text_model.layers.11.input_layernorm.weight": "model-00001-of-00003.safetensors",
43
- "model.text_model.layers.11.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
44
- "model.text_model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
45
- "model.text_model.layers.11.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
46
- "model.text_model.layers.11.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
47
- "model.text_model.layers.11.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
48
- "model.text_model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
49
- "model.text_model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
50
- "model.text_model.layers.11.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
51
- "model.text_model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
52
- "model.text_model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
53
- "model.text_model.layers.12.input_layernorm.weight": "model-00001-of-00003.safetensors",
54
- "model.text_model.layers.12.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
55
- "model.text_model.layers.12.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
56
- "model.text_model.layers.12.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
57
- "model.text_model.layers.12.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
58
- "model.text_model.layers.12.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
59
- "model.text_model.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
60
- "model.text_model.layers.12.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
61
- "model.text_model.layers.12.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
62
- "model.text_model.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
63
- "model.text_model.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
64
- "model.text_model.layers.13.input_layernorm.weight": "model-00001-of-00003.safetensors",
65
- "model.text_model.layers.13.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
66
- "model.text_model.layers.13.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
67
- "model.text_model.layers.13.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
68
- "model.text_model.layers.13.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
69
- "model.text_model.layers.13.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
70
- "model.text_model.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
71
- "model.text_model.layers.13.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
72
- "model.text_model.layers.13.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
73
- "model.text_model.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
74
- "model.text_model.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
75
- "model.text_model.layers.14.input_layernorm.weight": "model-00001-of-00003.safetensors",
76
- "model.text_model.layers.14.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
77
- "model.text_model.layers.14.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
78
- "model.text_model.layers.14.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
79
- "model.text_model.layers.14.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
80
- "model.text_model.layers.14.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
81
- "model.text_model.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
82
- "model.text_model.layers.14.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
83
- "model.text_model.layers.14.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
84
- "model.text_model.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
85
- "model.text_model.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
86
- "model.text_model.layers.15.input_layernorm.weight": "model-00001-of-00003.safetensors",
87
- "model.text_model.layers.15.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
88
- "model.text_model.layers.15.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
89
- "model.text_model.layers.15.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
90
- "model.text_model.layers.15.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
91
- "model.text_model.layers.15.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
92
- "model.text_model.layers.15.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
93
- "model.text_model.layers.15.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
94
- "model.text_model.layers.15.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
95
- "model.text_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
96
- "model.text_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
97
- "model.text_model.layers.16.input_layernorm.weight": "model-00001-of-00003.safetensors",
98
- "model.text_model.layers.16.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
99
- "model.text_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
100
- "model.text_model.layers.16.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
101
- "model.text_model.layers.16.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
102
- "model.text_model.layers.16.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
103
- "model.text_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
104
- "model.text_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
105
- "model.text_model.layers.16.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
106
- "model.text_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
107
- "model.text_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
108
- "model.text_model.layers.17.input_layernorm.weight": "model-00001-of-00003.safetensors",
109
- "model.text_model.layers.17.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
110
- "model.text_model.layers.17.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
111
- "model.text_model.layers.17.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
112
- "model.text_model.layers.17.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
113
- "model.text_model.layers.17.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
114
- "model.text_model.layers.17.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
115
- "model.text_model.layers.17.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
116
- "model.text_model.layers.17.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
117
- "model.text_model.layers.17.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
118
- "model.text_model.layers.17.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
119
- "model.text_model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
120
- "model.text_model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
121
- "model.text_model.layers.18.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
122
- "model.text_model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
123
- "model.text_model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
124
- "model.text_model.layers.18.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
125
- "model.text_model.layers.18.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
126
- "model.text_model.layers.18.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
127
- "model.text_model.layers.18.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
128
- "model.text_model.layers.18.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
129
- "model.text_model.layers.18.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
130
- "model.text_model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
131
- "model.text_model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
132
- "model.text_model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
133
- "model.text_model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
134
- "model.text_model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
135
- "model.text_model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
136
- "model.text_model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
137
- "model.text_model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
138
- "model.text_model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
139
- "model.text_model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
140
- "model.text_model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
141
- "model.text_model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
142
- "model.text_model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
143
- "model.text_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
144
- "model.text_model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
145
- "model.text_model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
146
- "model.text_model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
147
- "model.text_model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
148
- "model.text_model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
149
- "model.text_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
150
- "model.text_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
151
- "model.text_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
152
- "model.text_model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
153
- "model.text_model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
154
- "model.text_model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
155
- "model.text_model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
156
- "model.text_model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
157
- "model.text_model.layers.20.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
158
- "model.text_model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
159
- "model.text_model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
160
- "model.text_model.layers.20.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
161
- "model.text_model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
162
- "model.text_model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
163
- "model.text_model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
164
- "model.text_model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
165
- "model.text_model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
166
- "model.text_model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
167
- "model.text_model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
168
- "model.text_model.layers.21.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
169
- "model.text_model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
170
- "model.text_model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
171
- "model.text_model.layers.21.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
172
- "model.text_model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
173
- "model.text_model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
174
- "model.text_model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
175
- "model.text_model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
176
- "model.text_model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
177
- "model.text_model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
178
- "model.text_model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
179
- "model.text_model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
180
- "model.text_model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
181
- "model.text_model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
182
- "model.text_model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
183
- "model.text_model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
184
- "model.text_model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
185
- "model.text_model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
186
- "model.text_model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
187
- "model.text_model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
188
- "model.text_model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
189
- "model.text_model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
190
- "model.text_model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
191
- "model.text_model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
192
- "model.text_model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
193
- "model.text_model.layers.23.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
194
- "model.text_model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
195
- "model.text_model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
196
- "model.text_model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
197
- "model.text_model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
198
- "model.text_model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
199
- "model.text_model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
200
- "model.text_model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
201
- "model.text_model.layers.24.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
202
- "model.text_model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
203
- "model.text_model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
204
- "model.text_model.layers.24.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
205
- "model.text_model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
206
- "model.text_model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
207
- "model.text_model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
208
- "model.text_model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
209
- "model.text_model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
210
- "model.text_model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
211
- "model.text_model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
212
- "model.text_model.layers.25.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
213
- "model.text_model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
214
- "model.text_model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
215
- "model.text_model.layers.25.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
216
- "model.text_model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
217
- "model.text_model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
218
- "model.text_model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
219
- "model.text_model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
220
- "model.text_model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
221
- "model.text_model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
222
- "model.text_model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
223
- "model.text_model.layers.26.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
224
- "model.text_model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
225
- "model.text_model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
226
- "model.text_model.layers.26.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
227
- "model.text_model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
228
- "model.text_model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
229
- "model.text_model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
230
- "model.text_model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
231
- "model.text_model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
232
- "model.text_model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
233
- "model.text_model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
234
- "model.text_model.layers.27.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
235
- "model.text_model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
236
- "model.text_model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
237
- "model.text_model.layers.27.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
238
- "model.text_model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
239
- "model.text_model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
240
- "model.text_model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
241
- "model.text_model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
242
- "model.text_model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
243
- "model.text_model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
244
- "model.text_model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
245
- "model.text_model.layers.3.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
246
- "model.text_model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
247
- "model.text_model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
248
- "model.text_model.layers.3.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
249
- "model.text_model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
- "model.text_model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
- "model.text_model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
- "model.text_model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
- "model.text_model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
- "model.text_model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
- "model.text_model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
- "model.text_model.layers.4.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
257
- "model.text_model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
258
- "model.text_model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
259
- "model.text_model.layers.4.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
260
- "model.text_model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
261
- "model.text_model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
262
- "model.text_model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
263
- "model.text_model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
264
- "model.text_model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
265
- "model.text_model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
266
- "model.text_model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
267
- "model.text_model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
268
- "model.text_model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
269
- "model.text_model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
270
- "model.text_model.layers.5.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
271
- "model.text_model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
272
- "model.text_model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
273
- "model.text_model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
274
- "model.text_model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
275
- "model.text_model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
276
- "model.text_model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
277
- "model.text_model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
278
- "model.text_model.layers.6.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
279
- "model.text_model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
280
- "model.text_model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
281
- "model.text_model.layers.6.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
282
- "model.text_model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
283
- "model.text_model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
284
- "model.text_model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
285
- "model.text_model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
286
- "model.text_model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
287
- "model.text_model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
288
- "model.text_model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
289
- "model.text_model.layers.7.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
290
- "model.text_model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
291
- "model.text_model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
292
- "model.text_model.layers.7.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
293
- "model.text_model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
294
- "model.text_model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
295
- "model.text_model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
296
- "model.text_model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
297
- "model.text_model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
298
- "model.text_model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
299
- "model.text_model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
300
- "model.text_model.layers.8.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
301
- "model.text_model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
302
- "model.text_model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
303
- "model.text_model.layers.8.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
304
- "model.text_model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
305
- "model.text_model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
306
- "model.text_model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
307
- "model.text_model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
308
- "model.text_model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
309
- "model.text_model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
310
- "model.text_model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
311
- "model.text_model.layers.9.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
312
- "model.text_model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
313
- "model.text_model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
314
- "model.text_model.layers.9.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
315
- "model.text_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
316
- "model.text_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
317
- "model.text_model.norm.weight": "model-00002-of-00003.safetensors",
318
- "model.vision_embedding.multimodal_projector.0.weight": "model-00002-of-00003.safetensors",
319
- "model.vision_embedding.multimodal_projector.2.weight": "model-00002-of-00003.safetensors",
320
- "model.vision_embedding.vision_tower.embeddings.patch_embedding.bias": "model-00002-of-00003.safetensors",
321
- "model.vision_embedding.vision_tower.embeddings.patch_embedding.weight": "model-00002-of-00003.safetensors",
322
- "model.vision_embedding.vision_tower.embeddings.position_embedding.weight": "model-00002-of-00003.safetensors",
323
- "model.vision_embedding.vision_tower.encoder.layers.0.layer_norm1.bias": "model-00002-of-00003.safetensors",
324
- "model.vision_embedding.vision_tower.encoder.layers.0.layer_norm1.weight": "model-00002-of-00003.safetensors",
325
- "model.vision_embedding.vision_tower.encoder.layers.0.layer_norm2.bias": "model-00002-of-00003.safetensors",
326
- "model.vision_embedding.vision_tower.encoder.layers.0.layer_norm2.weight": "model-00002-of-00003.safetensors",
327
- "model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc1.bias": "model-00002-of-00003.safetensors",
328
- "model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc1.weight": "model-00002-of-00003.safetensors",
329
- "model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc2.bias": "model-00002-of-00003.safetensors",
330
- "model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc2.weight": "model-00002-of-00003.safetensors",
331
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
332
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
333
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
334
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
335
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
336
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
337
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
338
- "model.vision_embedding.vision_tower.encoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
339
- "model.vision_embedding.vision_tower.encoder.layers.1.layer_norm1.bias": "model-00002-of-00003.safetensors",
340
- "model.vision_embedding.vision_tower.encoder.layers.1.layer_norm1.weight": "model-00002-of-00003.safetensors",
341
- "model.vision_embedding.vision_tower.encoder.layers.1.layer_norm2.bias": "model-00002-of-00003.safetensors",
342
- "model.vision_embedding.vision_tower.encoder.layers.1.layer_norm2.weight": "model-00002-of-00003.safetensors",
343
- "model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc1.bias": "model-00002-of-00003.safetensors",
344
- "model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc1.weight": "model-00002-of-00003.safetensors",
345
- "model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc2.bias": "model-00002-of-00003.safetensors",
346
- "model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc2.weight": "model-00002-of-00003.safetensors",
347
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
348
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
349
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
350
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
351
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
352
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
353
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
354
- "model.vision_embedding.vision_tower.encoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
355
- "model.vision_embedding.vision_tower.encoder.layers.10.layer_norm1.bias": "model-00002-of-00003.safetensors",
356
- "model.vision_embedding.vision_tower.encoder.layers.10.layer_norm1.weight": "model-00002-of-00003.safetensors",
357
- "model.vision_embedding.vision_tower.encoder.layers.10.layer_norm2.bias": "model-00002-of-00003.safetensors",
358
- "model.vision_embedding.vision_tower.encoder.layers.10.layer_norm2.weight": "model-00002-of-00003.safetensors",
359
- "model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc1.bias": "model-00002-of-00003.safetensors",
360
- "model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc1.weight": "model-00002-of-00003.safetensors",
361
- "model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc2.bias": "model-00002-of-00003.safetensors",
362
- "model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc2.weight": "model-00002-of-00003.safetensors",
363
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
364
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
365
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
366
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
367
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
368
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
369
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
370
- "model.vision_embedding.vision_tower.encoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
371
- "model.vision_embedding.vision_tower.encoder.layers.11.layer_norm1.bias": "model-00002-of-00003.safetensors",
372
- "model.vision_embedding.vision_tower.encoder.layers.11.layer_norm1.weight": "model-00002-of-00003.safetensors",
373
- "model.vision_embedding.vision_tower.encoder.layers.11.layer_norm2.bias": "model-00002-of-00003.safetensors",
374
- "model.vision_embedding.vision_tower.encoder.layers.11.layer_norm2.weight": "model-00002-of-00003.safetensors",
375
- "model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc1.bias": "model-00002-of-00003.safetensors",
376
- "model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc1.weight": "model-00002-of-00003.safetensors",
377
- "model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc2.bias": "model-00002-of-00003.safetensors",
378
- "model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc2.weight": "model-00002-of-00003.safetensors",
379
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
380
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
381
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
382
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
383
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
384
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
385
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
386
- "model.vision_embedding.vision_tower.encoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
387
- "model.vision_embedding.vision_tower.encoder.layers.12.layer_norm1.bias": "model-00002-of-00003.safetensors",
388
- "model.vision_embedding.vision_tower.encoder.layers.12.layer_norm1.weight": "model-00002-of-00003.safetensors",
389
- "model.vision_embedding.vision_tower.encoder.layers.12.layer_norm2.bias": "model-00002-of-00003.safetensors",
390
- "model.vision_embedding.vision_tower.encoder.layers.12.layer_norm2.weight": "model-00002-of-00003.safetensors",
391
- "model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc1.bias": "model-00002-of-00003.safetensors",
392
- "model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc1.weight": "model-00002-of-00003.safetensors",
393
- "model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc2.bias": "model-00002-of-00003.safetensors",
394
- "model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc2.weight": "model-00002-of-00003.safetensors",
395
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
396
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
397
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
398
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
399
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
400
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
401
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
402
- "model.vision_embedding.vision_tower.encoder.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
403
- "model.vision_embedding.vision_tower.encoder.layers.13.layer_norm1.bias": "model-00002-of-00003.safetensors",
404
- "model.vision_embedding.vision_tower.encoder.layers.13.layer_norm1.weight": "model-00002-of-00003.safetensors",
405
- "model.vision_embedding.vision_tower.encoder.layers.13.layer_norm2.bias": "model-00002-of-00003.safetensors",
406
- "model.vision_embedding.vision_tower.encoder.layers.13.layer_norm2.weight": "model-00002-of-00003.safetensors",
407
- "model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc1.bias": "model-00002-of-00003.safetensors",
408
- "model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc1.weight": "model-00002-of-00003.safetensors",
409
- "model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc2.bias": "model-00002-of-00003.safetensors",
410
- "model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc2.weight": "model-00002-of-00003.safetensors",
411
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
412
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
413
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
414
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
415
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
416
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
417
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
418
- "model.vision_embedding.vision_tower.encoder.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
419
- "model.vision_embedding.vision_tower.encoder.layers.14.layer_norm1.bias": "model-00002-of-00003.safetensors",
420
- "model.vision_embedding.vision_tower.encoder.layers.14.layer_norm1.weight": "model-00002-of-00003.safetensors",
421
- "model.vision_embedding.vision_tower.encoder.layers.14.layer_norm2.bias": "model-00002-of-00003.safetensors",
422
- "model.vision_embedding.vision_tower.encoder.layers.14.layer_norm2.weight": "model-00002-of-00003.safetensors",
423
- "model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc1.bias": "model-00002-of-00003.safetensors",
424
- "model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc1.weight": "model-00002-of-00003.safetensors",
425
- "model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc2.bias": "model-00002-of-00003.safetensors",
426
- "model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc2.weight": "model-00002-of-00003.safetensors",
427
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
428
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
429
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
430
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
431
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
432
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
433
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
434
- "model.vision_embedding.vision_tower.encoder.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
435
- "model.vision_embedding.vision_tower.encoder.layers.15.layer_norm1.bias": "model-00002-of-00003.safetensors",
436
- "model.vision_embedding.vision_tower.encoder.layers.15.layer_norm1.weight": "model-00002-of-00003.safetensors",
437
- "model.vision_embedding.vision_tower.encoder.layers.15.layer_norm2.bias": "model-00002-of-00003.safetensors",
438
- "model.vision_embedding.vision_tower.encoder.layers.15.layer_norm2.weight": "model-00002-of-00003.safetensors",
439
- "model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc1.bias": "model-00002-of-00003.safetensors",
440
- "model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc1.weight": "model-00002-of-00003.safetensors",
441
- "model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc2.bias": "model-00002-of-00003.safetensors",
442
- "model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc2.weight": "model-00002-of-00003.safetensors",
443
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
444
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
445
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
446
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
447
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
448
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
449
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
450
- "model.vision_embedding.vision_tower.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
451
- "model.vision_embedding.vision_tower.encoder.layers.16.layer_norm1.bias": "model-00002-of-00003.safetensors",
452
- "model.vision_embedding.vision_tower.encoder.layers.16.layer_norm1.weight": "model-00002-of-00003.safetensors",
453
- "model.vision_embedding.vision_tower.encoder.layers.16.layer_norm2.bias": "model-00002-of-00003.safetensors",
454
- "model.vision_embedding.vision_tower.encoder.layers.16.layer_norm2.weight": "model-00002-of-00003.safetensors",
455
- "model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc1.bias": "model-00002-of-00003.safetensors",
456
- "model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc1.weight": "model-00002-of-00003.safetensors",
457
- "model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc2.bias": "model-00002-of-00003.safetensors",
458
- "model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc2.weight": "model-00002-of-00003.safetensors",
459
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
460
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
461
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
462
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
463
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
464
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
465
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
466
- "model.vision_embedding.vision_tower.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
467
- "model.vision_embedding.vision_tower.encoder.layers.17.layer_norm1.bias": "model-00002-of-00003.safetensors",
468
- "model.vision_embedding.vision_tower.encoder.layers.17.layer_norm1.weight": "model-00002-of-00003.safetensors",
469
- "model.vision_embedding.vision_tower.encoder.layers.17.layer_norm2.bias": "model-00002-of-00003.safetensors",
470
- "model.vision_embedding.vision_tower.encoder.layers.17.layer_norm2.weight": "model-00002-of-00003.safetensors",
471
- "model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc1.bias": "model-00002-of-00003.safetensors",
472
- "model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc1.weight": "model-00002-of-00003.safetensors",
473
- "model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc2.bias": "model-00002-of-00003.safetensors",
474
- "model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc2.weight": "model-00002-of-00003.safetensors",
475
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
476
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
477
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
478
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
479
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
480
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
481
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
482
- "model.vision_embedding.vision_tower.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
483
- "model.vision_embedding.vision_tower.encoder.layers.18.layer_norm1.bias": "model-00002-of-00003.safetensors",
484
- "model.vision_embedding.vision_tower.encoder.layers.18.layer_norm1.weight": "model-00002-of-00003.safetensors",
485
- "model.vision_embedding.vision_tower.encoder.layers.18.layer_norm2.bias": "model-00002-of-00003.safetensors",
486
- "model.vision_embedding.vision_tower.encoder.layers.18.layer_norm2.weight": "model-00002-of-00003.safetensors",
487
- "model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc1.bias": "model-00002-of-00003.safetensors",
488
- "model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc1.weight": "model-00002-of-00003.safetensors",
489
- "model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc2.bias": "model-00002-of-00003.safetensors",
490
- "model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc2.weight": "model-00002-of-00003.safetensors",
491
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
492
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
493
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
494
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
495
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
496
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
497
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
498
- "model.vision_embedding.vision_tower.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
499
- "model.vision_embedding.vision_tower.encoder.layers.19.layer_norm1.bias": "model-00002-of-00003.safetensors",
500
- "model.vision_embedding.vision_tower.encoder.layers.19.layer_norm1.weight": "model-00002-of-00003.safetensors",
501
- "model.vision_embedding.vision_tower.encoder.layers.19.layer_norm2.bias": "model-00002-of-00003.safetensors",
502
- "model.vision_embedding.vision_tower.encoder.layers.19.layer_norm2.weight": "model-00002-of-00003.safetensors",
503
- "model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc1.bias": "model-00002-of-00003.safetensors",
504
- "model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc1.weight": "model-00002-of-00003.safetensors",
505
- "model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc2.bias": "model-00002-of-00003.safetensors",
506
- "model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc2.weight": "model-00002-of-00003.safetensors",
507
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
508
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
509
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
510
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
511
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
512
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
513
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
514
- "model.vision_embedding.vision_tower.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
515
- "model.vision_embedding.vision_tower.encoder.layers.2.layer_norm1.bias": "model-00002-of-00003.safetensors",
516
- "model.vision_embedding.vision_tower.encoder.layers.2.layer_norm1.weight": "model-00002-of-00003.safetensors",
517
- "model.vision_embedding.vision_tower.encoder.layers.2.layer_norm2.bias": "model-00002-of-00003.safetensors",
518
- "model.vision_embedding.vision_tower.encoder.layers.2.layer_norm2.weight": "model-00002-of-00003.safetensors",
519
- "model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc1.bias": "model-00002-of-00003.safetensors",
520
- "model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc1.weight": "model-00002-of-00003.safetensors",
521
- "model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc2.bias": "model-00002-of-00003.safetensors",
522
- "model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc2.weight": "model-00002-of-00003.safetensors",
523
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
524
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
525
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
526
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
527
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
528
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
529
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
530
- "model.vision_embedding.vision_tower.encoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
531
- "model.vision_embedding.vision_tower.encoder.layers.20.layer_norm1.bias": "model-00002-of-00003.safetensors",
532
- "model.vision_embedding.vision_tower.encoder.layers.20.layer_norm1.weight": "model-00002-of-00003.safetensors",
533
- "model.vision_embedding.vision_tower.encoder.layers.20.layer_norm2.bias": "model-00002-of-00003.safetensors",
534
- "model.vision_embedding.vision_tower.encoder.layers.20.layer_norm2.weight": "model-00002-of-00003.safetensors",
535
- "model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc1.bias": "model-00002-of-00003.safetensors",
536
- "model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc1.weight": "model-00002-of-00003.safetensors",
537
- "model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc2.bias": "model-00002-of-00003.safetensors",
538
- "model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc2.weight": "model-00002-of-00003.safetensors",
539
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
540
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
541
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
542
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
543
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
544
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
545
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
546
- "model.vision_embedding.vision_tower.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
547
- "model.vision_embedding.vision_tower.encoder.layers.21.layer_norm1.bias": "model-00002-of-00003.safetensors",
548
- "model.vision_embedding.vision_tower.encoder.layers.21.layer_norm1.weight": "model-00002-of-00003.safetensors",
549
- "model.vision_embedding.vision_tower.encoder.layers.21.layer_norm2.bias": "model-00002-of-00003.safetensors",
550
- "model.vision_embedding.vision_tower.encoder.layers.21.layer_norm2.weight": "model-00002-of-00003.safetensors",
551
- "model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc1.bias": "model-00002-of-00003.safetensors",
552
- "model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc1.weight": "model-00002-of-00003.safetensors",
553
- "model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc2.bias": "model-00002-of-00003.safetensors",
554
- "model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc2.weight": "model-00002-of-00003.safetensors",
555
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
556
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
557
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
558
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
559
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
560
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
561
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
562
- "model.vision_embedding.vision_tower.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
563
- "model.vision_embedding.vision_tower.encoder.layers.22.layer_norm1.bias": "model-00002-of-00003.safetensors",
564
- "model.vision_embedding.vision_tower.encoder.layers.22.layer_norm1.weight": "model-00002-of-00003.safetensors",
565
- "model.vision_embedding.vision_tower.encoder.layers.22.layer_norm2.bias": "model-00002-of-00003.safetensors",
566
- "model.vision_embedding.vision_tower.encoder.layers.22.layer_norm2.weight": "model-00002-of-00003.safetensors",
567
- "model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc1.bias": "model-00002-of-00003.safetensors",
568
- "model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc1.weight": "model-00002-of-00003.safetensors",
569
- "model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc2.bias": "model-00002-of-00003.safetensors",
570
- "model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc2.weight": "model-00002-of-00003.safetensors",
571
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
572
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
573
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
574
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
575
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
576
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
577
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
578
- "model.vision_embedding.vision_tower.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
579
- "model.vision_embedding.vision_tower.encoder.layers.23.layer_norm1.bias": "model-00002-of-00003.safetensors",
580
- "model.vision_embedding.vision_tower.encoder.layers.23.layer_norm1.weight": "model-00002-of-00003.safetensors",
581
- "model.vision_embedding.vision_tower.encoder.layers.23.layer_norm2.bias": "model-00002-of-00003.safetensors",
582
- "model.vision_embedding.vision_tower.encoder.layers.23.layer_norm2.weight": "model-00002-of-00003.safetensors",
583
- "model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc1.bias": "model-00002-of-00003.safetensors",
584
- "model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc1.weight": "model-00002-of-00003.safetensors",
585
- "model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc2.bias": "model-00002-of-00003.safetensors",
586
- "model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc2.weight": "model-00002-of-00003.safetensors",
587
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
588
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
589
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
590
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
591
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
592
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
593
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
594
- "model.vision_embedding.vision_tower.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
595
- "model.vision_embedding.vision_tower.encoder.layers.24.layer_norm1.bias": "model-00002-of-00003.safetensors",
596
- "model.vision_embedding.vision_tower.encoder.layers.24.layer_norm1.weight": "model-00002-of-00003.safetensors",
597
- "model.vision_embedding.vision_tower.encoder.layers.24.layer_norm2.bias": "model-00002-of-00003.safetensors",
598
- "model.vision_embedding.vision_tower.encoder.layers.24.layer_norm2.weight": "model-00002-of-00003.safetensors",
599
- "model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc1.bias": "model-00002-of-00003.safetensors",
600
- "model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc1.weight": "model-00002-of-00003.safetensors",
601
- "model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc2.bias": "model-00002-of-00003.safetensors",
602
- "model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc2.weight": "model-00002-of-00003.safetensors",
603
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
604
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
605
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
606
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
607
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
608
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
609
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
610
- "model.vision_embedding.vision_tower.encoder.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
611
- "model.vision_embedding.vision_tower.encoder.layers.25.layer_norm1.bias": "model-00002-of-00003.safetensors",
612
- "model.vision_embedding.vision_tower.encoder.layers.25.layer_norm1.weight": "model-00002-of-00003.safetensors",
613
- "model.vision_embedding.vision_tower.encoder.layers.25.layer_norm2.bias": "model-00002-of-00003.safetensors",
614
- "model.vision_embedding.vision_tower.encoder.layers.25.layer_norm2.weight": "model-00002-of-00003.safetensors",
615
- "model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc1.bias": "model-00002-of-00003.safetensors",
616
- "model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc1.weight": "model-00002-of-00003.safetensors",
617
- "model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc2.bias": "model-00002-of-00003.safetensors",
618
- "model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc2.weight": "model-00002-of-00003.safetensors",
619
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
620
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
621
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
622
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
623
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
624
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
625
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
626
- "model.vision_embedding.vision_tower.encoder.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
627
- "model.vision_embedding.vision_tower.encoder.layers.26.layer_norm1.bias": "model-00002-of-00003.safetensors",
628
- "model.vision_embedding.vision_tower.encoder.layers.26.layer_norm1.weight": "model-00002-of-00003.safetensors",
629
- "model.vision_embedding.vision_tower.encoder.layers.26.layer_norm2.bias": "model-00002-of-00003.safetensors",
630
- "model.vision_embedding.vision_tower.encoder.layers.26.layer_norm2.weight": "model-00002-of-00003.safetensors",
631
- "model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc1.bias": "model-00002-of-00003.safetensors",
632
- "model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc1.weight": "model-00002-of-00003.safetensors",
633
- "model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc2.bias": "model-00002-of-00003.safetensors",
634
- "model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc2.weight": "model-00002-of-00003.safetensors",
635
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
636
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
637
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
638
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
639
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
640
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
641
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
642
- "model.vision_embedding.vision_tower.encoder.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
643
- "model.vision_embedding.vision_tower.encoder.layers.3.layer_norm1.bias": "model-00002-of-00003.safetensors",
644
- "model.vision_embedding.vision_tower.encoder.layers.3.layer_norm1.weight": "model-00002-of-00003.safetensors",
645
- "model.vision_embedding.vision_tower.encoder.layers.3.layer_norm2.bias": "model-00002-of-00003.safetensors",
646
- "model.vision_embedding.vision_tower.encoder.layers.3.layer_norm2.weight": "model-00002-of-00003.safetensors",
647
- "model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc1.bias": "model-00002-of-00003.safetensors",
648
- "model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc1.weight": "model-00002-of-00003.safetensors",
649
- "model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc2.bias": "model-00002-of-00003.safetensors",
650
- "model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc2.weight": "model-00002-of-00003.safetensors",
651
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
652
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
653
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
654
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
655
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
656
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
657
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
658
- "model.vision_embedding.vision_tower.encoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
659
- "model.vision_embedding.vision_tower.encoder.layers.4.layer_norm1.bias": "model-00002-of-00003.safetensors",
660
- "model.vision_embedding.vision_tower.encoder.layers.4.layer_norm1.weight": "model-00002-of-00003.safetensors",
661
- "model.vision_embedding.vision_tower.encoder.layers.4.layer_norm2.bias": "model-00002-of-00003.safetensors",
662
- "model.vision_embedding.vision_tower.encoder.layers.4.layer_norm2.weight": "model-00002-of-00003.safetensors",
663
- "model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc1.bias": "model-00002-of-00003.safetensors",
664
- "model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc1.weight": "model-00002-of-00003.safetensors",
665
- "model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc2.bias": "model-00002-of-00003.safetensors",
666
- "model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc2.weight": "model-00002-of-00003.safetensors",
667
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
668
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
669
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
670
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
671
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
672
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
673
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
674
- "model.vision_embedding.vision_tower.encoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
675
- "model.vision_embedding.vision_tower.encoder.layers.5.layer_norm1.bias": "model-00002-of-00003.safetensors",
676
- "model.vision_embedding.vision_tower.encoder.layers.5.layer_norm1.weight": "model-00002-of-00003.safetensors",
677
- "model.vision_embedding.vision_tower.encoder.layers.5.layer_norm2.bias": "model-00002-of-00003.safetensors",
678
- "model.vision_embedding.vision_tower.encoder.layers.5.layer_norm2.weight": "model-00002-of-00003.safetensors",
679
- "model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc1.bias": "model-00002-of-00003.safetensors",
680
- "model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc1.weight": "model-00002-of-00003.safetensors",
681
- "model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc2.bias": "model-00002-of-00003.safetensors",
682
- "model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc2.weight": "model-00002-of-00003.safetensors",
683
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
684
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
685
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
686
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
687
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
688
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
689
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
690
- "model.vision_embedding.vision_tower.encoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
691
- "model.vision_embedding.vision_tower.encoder.layers.6.layer_norm1.bias": "model-00002-of-00003.safetensors",
692
- "model.vision_embedding.vision_tower.encoder.layers.6.layer_norm1.weight": "model-00002-of-00003.safetensors",
693
- "model.vision_embedding.vision_tower.encoder.layers.6.layer_norm2.bias": "model-00002-of-00003.safetensors",
694
- "model.vision_embedding.vision_tower.encoder.layers.6.layer_norm2.weight": "model-00002-of-00003.safetensors",
695
- "model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc1.bias": "model-00002-of-00003.safetensors",
696
- "model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc1.weight": "model-00002-of-00003.safetensors",
697
- "model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc2.bias": "model-00002-of-00003.safetensors",
698
- "model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc2.weight": "model-00002-of-00003.safetensors",
699
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
700
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
701
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
702
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
703
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
704
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
705
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
706
- "model.vision_embedding.vision_tower.encoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
707
- "model.vision_embedding.vision_tower.encoder.layers.7.layer_norm1.bias": "model-00002-of-00003.safetensors",
708
- "model.vision_embedding.vision_tower.encoder.layers.7.layer_norm1.weight": "model-00002-of-00003.safetensors",
709
- "model.vision_embedding.vision_tower.encoder.layers.7.layer_norm2.bias": "model-00002-of-00003.safetensors",
710
- "model.vision_embedding.vision_tower.encoder.layers.7.layer_norm2.weight": "model-00002-of-00003.safetensors",
711
- "model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc1.bias": "model-00002-of-00003.safetensors",
712
- "model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc1.weight": "model-00002-of-00003.safetensors",
713
- "model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc2.bias": "model-00002-of-00003.safetensors",
714
- "model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc2.weight": "model-00002-of-00003.safetensors",
715
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
716
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
717
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
718
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
719
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
720
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
721
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
722
- "model.vision_embedding.vision_tower.encoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
723
- "model.vision_embedding.vision_tower.encoder.layers.8.layer_norm1.bias": "model-00002-of-00003.safetensors",
724
- "model.vision_embedding.vision_tower.encoder.layers.8.layer_norm1.weight": "model-00002-of-00003.safetensors",
725
- "model.vision_embedding.vision_tower.encoder.layers.8.layer_norm2.bias": "model-00002-of-00003.safetensors",
726
- "model.vision_embedding.vision_tower.encoder.layers.8.layer_norm2.weight": "model-00002-of-00003.safetensors",
727
- "model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc1.bias": "model-00002-of-00003.safetensors",
728
- "model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc1.weight": "model-00002-of-00003.safetensors",
729
- "model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc2.bias": "model-00002-of-00003.safetensors",
730
- "model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc2.weight": "model-00002-of-00003.safetensors",
731
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
732
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
733
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
734
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
735
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
736
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
737
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
738
- "model.vision_embedding.vision_tower.encoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
739
- "model.vision_embedding.vision_tower.encoder.layers.9.layer_norm1.bias": "model-00002-of-00003.safetensors",
740
- "model.vision_embedding.vision_tower.encoder.layers.9.layer_norm1.weight": "model-00002-of-00003.safetensors",
741
- "model.vision_embedding.vision_tower.encoder.layers.9.layer_norm2.bias": "model-00002-of-00003.safetensors",
742
- "model.vision_embedding.vision_tower.encoder.layers.9.layer_norm2.weight": "model-00002-of-00003.safetensors",
743
- "model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc1.bias": "model-00002-of-00003.safetensors",
744
- "model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc1.weight": "model-00002-of-00003.safetensors",
745
- "model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc2.bias": "model-00002-of-00003.safetensors",
746
- "model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc2.weight": "model-00002-of-00003.safetensors",
747
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
748
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
749
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
750
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
751
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
752
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
753
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
754
- "model.vision_embedding.vision_tower.encoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
755
- "model.vision_embedding.vision_tower.post_layernorm.bias": "model-00002-of-00003.safetensors",
756
- "model.vision_embedding.vision_tower.post_layernorm.weight": "model-00002-of-00003.safetensors"
757
- }
758
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modular_isaac.py CHANGED
@@ -117,7 +117,7 @@ from transformers.image_utils import (
117
  PILImageResampling,
118
  )
119
  from transformers.modeling_attn_mask_utils import AttentionMaskConverter
120
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
121
  from transformers.modeling_rope_utils import rope_config_validation
122
  from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
123
  from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
@@ -130,10 +130,17 @@ from transformers.models.siglip2.modeling_siglip2 import (
130
  Siglip2EncoderLayer,
131
  Siglip2VisionEmbeddings,
132
  )
133
- from transformers.masking_utils import create_masks_for_generate, eager_mask, packed_sequence_mask_function, sdpa_mask
 
 
 
 
 
 
134
  from transformers.processing_utils import ImagesKwargs, ProcessorMixin, Unpack
135
  from transformers.utils import auto_docstring, TensorType
136
- from transformers.utils.generic import can_return_tuple, check_model_inputs
 
137
 
138
  # Vision preprocessing constants
139
  from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN
@@ -141,22 +148,24 @@ from transformers.utils.constants import IMAGENET_STANDARD_STD as VISION_STD
141
  from transformers.utils.import_utils import is_torchdynamo_compiling
142
 
143
  try:
144
- from perceptron.tensorstream import (
145
- Event,
146
- Stream,
147
- TensorStream,
148
- TextType,
149
- VisionType,
150
- create_stream,
151
- group_streams,
152
- )
153
- from perceptron.tensorstream.ops import (
154
- compute_mrope_pos_tensor,
155
- modality_mask,
156
- reconstruct_tensor_stream_from_compact_dict,
157
- slice as ts_slice,
158
- tensor_stream_token_view,
159
- )
 
 
160
  except ModuleNotFoundError as exc: # pragma: no cover - import guard
161
  raise ModuleNotFoundError(
162
  "genesis.public.tensorstream is required for the Isaac HuggingFace integration. "
@@ -220,7 +229,7 @@ class IsaacVisionConfig(Siglip2VisionConfig):
220
  self._attn_implementation = "sdpa"
221
 
222
 
223
- class IsaacImageProcessorKwargs(ImagesKwargs, total=False):
224
  patch_size: Optional[int]
225
  max_num_patches: Optional[int]
226
  min_num_patches: Optional[int]
@@ -234,36 +243,27 @@ class IsaacImageProcessorFast(BaseImageProcessorFast):
234
 
235
  resample = PILImageResampling.BILINEAR
236
  model_input_names = ["patches", "token_grids"]
237
- valid_kwargs = IsaacImageProcessorKwargs
238
  unused_kwargs = ["size", "do_center_crop", "crop_size"]
239
 
240
  do_resize = True
241
- size: Optional[SizeDict] = None
242
- default_to_square: Optional[bool] = None
243
  do_center_crop = False
244
- crop_size: Optional[SizeDict] = None
245
  patch_size: Optional[int] = 16
246
  max_num_patches: Optional[int] = 256
247
  min_num_patches: Optional[int] = None
248
  pixel_shuffle_scale: Optional[int] = 1
249
  do_pad = False
250
- pad_size: Optional[SizeDict] = None
251
  do_rescale = True
252
- rescale_factor = 1 / 255
253
  do_normalize = True
254
  image_mean = list(VISION_MEAN)
255
  image_std = list(VISION_STD)
256
  do_convert_rgb = True
257
- return_tensors = None
258
- data_format = ChannelDimension.FIRST
259
- input_data_format = None
260
- device = None
261
  disable_grouping = False
262
  size_divisor: Optional[int] = None
263
 
264
  def __init__(
265
  self,
266
- **kwargs: Unpack[IsaacImageProcessorKwargs],
267
  ) -> None:
268
  super().__init__(**kwargs)
269
 
@@ -399,7 +399,7 @@ class IsaacImageProcessorFast(BaseImageProcessorFast):
399
  nhwc_images = image_batch.permute(0, 2, 3, 1)
400
  nhwc_images = _compute_residual_p_frames(nhwc_images, is_p_frame=[False] * batch_size)
401
 
402
- patches = patchify_vision(nhwc_images, patch_size=patch_size)
403
  _, height_tokens, width_tokens, _ = patches.shape
404
 
405
  token_grid = (
@@ -488,32 +488,39 @@ def document_mask_function_from_cu_seqlens(cu_seqlens: Optional[torch.Tensor]) -
488
  return packed_sequence_mask_function(packed_sequence_mask)
489
 
490
 
491
- def ensure_document_attention_mask(
492
- attention_mask: Optional[torch.Tensor],
 
493
  cu_seqlens: Optional[torch.Tensor],
494
- total_tokens: int,
495
- dtype: torch.dtype,
496
- device: torch.device,
497
- *,
498
- return_mask_function: bool = False,
499
- ) -> Optional[Union[torch.Tensor, Callable]]:
500
- """Return the provided mask, a callable mask from ``cu_seqlens``, or ``None``.
501
-
502
- ``return_mask_function=True`` yields a callable suitable for ``masking_utils``; otherwise
503
- ``None`` is returned when no explicit ``attention_mask`` is provided. The legacy additive mask
504
- has been removed in favor of the callable-based path.
505
- """
506
 
507
- if attention_mask is not None:
508
- return attention_mask
 
 
509
 
510
- if cu_seqlens is None:
 
511
  return None
512
 
513
- if return_mask_function:
514
- return document_mask_function_from_cu_seqlens(cu_seqlens)
515
-
516
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
517
 
518
 
519
  class IsaacVisionEmbeddings(nn.Module):
@@ -671,18 +678,11 @@ class IsaacVisionAttention(Siglip2Attention):
671
  self,
672
  hidden_states: torch.Tensor,
673
  attention_mask: Optional[torch.Tensor] = None,
674
- position_ids: Optional[torch.Tensor] = None,
675
- past_key_value: Optional[torch.Tensor] = None,
676
  output_attentions: bool = False,
677
- is_causal: bool = False,
678
  cu_seqlens: Optional[torch.Tensor] = None,
679
  max_seqlen: Optional[int] = None,
680
  **kwargs,
681
  ):
682
- # Ignore unused arguments for interface compatibility
683
- _ = position_ids
684
- _ = past_key_value
685
- _ = is_causal
686
  kwargs.pop("output_hidden_states", None)
687
  kwargs.pop("return_dict", None)
688
 
@@ -695,22 +695,10 @@ class IsaacVisionAttention(Siglip2Attention):
695
  keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
696
  values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
697
 
698
- if not queries.is_contiguous():
699
- queries = queries.contiguous()
700
- if not keys.is_contiguous():
701
- keys = keys.contiguous()
702
- if not values.is_contiguous():
703
- values = values.contiguous()
704
-
705
- L = queries.size(0)
706
- if max_seqlen is not None:
707
- max_q = max_k = int(max_seqlen)
708
- else:
709
- max_q = max_k = self._max_from_cu(cu_seqlens, L)
710
-
711
  attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"]
712
- if self.config._attn_implementation != "sdpa":
713
- attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
714
 
715
  dropout = 0.0 if not self.training else self.dropout
716
  attention_kwargs: dict[str, Any] = {
@@ -718,15 +706,36 @@ class IsaacVisionAttention(Siglip2Attention):
718
  "scaling": self.scale,
719
  "dropout": dropout,
720
  }
721
- if cu_seqlens is not None:
722
- attention_kwargs["cu_seq_lens_q"] = cu_seqlens
723
- attention_kwargs["cu_seq_lens_k"] = cu_seqlens
724
- if max_seqlen is not None:
725
- attention_kwargs["max_length_q"] = max_q
726
- attention_kwargs["max_length_k"] = max_k
727
- if output_attentions:
 
 
 
728
  attention_kwargs["output_attentions"] = True
729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
  attn_output, attn_weights = attention_interface(
731
  self,
732
  queries,
@@ -749,12 +758,6 @@ class IsaacVisionAttention(Siglip2Attention):
749
 
750
  return attn_output, attn_weights
751
 
752
- @staticmethod
753
- def _max_from_cu(cu: Optional[torch.Tensor], fallback: int) -> int:
754
- if cu is None or cu.numel() < 2:
755
- return fallback
756
- return int((cu[1:] - cu[:-1]).max().item())
757
-
758
 
759
  class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
760
  """Isaac vision encoder layer with variable-length attention."""
@@ -780,30 +783,16 @@ class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
780
  Maximum document length referenced by `cu_seqlens`. Passed to FlashAttention so it can size temporary
781
  buffers for packed variable-length attention.
782
  """
783
- attention_mask = ensure_document_attention_mask(
784
- attention_mask,
785
- cu_seqlens,
786
- hidden_states.size(1),
787
- hidden_states.dtype,
788
- hidden_states.device,
789
- return_mask_function=False,
790
- )
791
-
792
  # Run attention directly so variable-length metadata reaches FlashAttention.
793
  residual = hidden_states
794
  hidden_states = self.layer_norm1(hidden_states)
795
- attn_outputs = self.self_attn(
796
  hidden_states,
797
  attention_mask=attention_mask,
798
  cu_seqlens=cu_seqlens,
799
  max_seqlen=max_seqlen,
800
- output_attentions=output_attentions,
801
  **kwargs,
802
  )
803
- if isinstance(attn_outputs, tuple):
804
- attn_output, attn_weights = attn_outputs
805
- else:
806
- attn_output, attn_weights = attn_outputs, None
807
  hidden_states = residual + attn_output
808
 
809
  residual = hidden_states
@@ -811,8 +800,6 @@ class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
811
  hidden_states = self.mlp(hidden_states)
812
  hidden_states = residual + hidden_states
813
 
814
- if output_attentions:
815
- return hidden_states, attn_weights
816
  return hidden_states
817
 
818
 
@@ -824,36 +811,21 @@ class IsaacVisionEncoder(Siglip2Encoder):
824
  self.layers = nn.ModuleList([IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
825
 
826
  @can_return_tuple
 
827
  def forward(
828
  self,
829
  inputs_embeds,
830
  attention_mask: Optional[torch.Tensor] = None,
831
- cu_seqlens: Optional[torch.Tensor] = None,
832
- max_seqlen: Optional[int] = None,
833
- output_attentions: Optional[bool] = None,
834
- output_hidden_states: Optional[bool] = None,
835
- return_dict: Optional[bool] = None,
836
  **kwargs: Unpack[TransformersKwargs],
837
  ):
838
- attention_mask = ensure_document_attention_mask(
839
- attention_mask,
840
- cu_seqlens,
841
- inputs_embeds.size(1),
842
- inputs_embeds.dtype,
843
- inputs_embeds.device,
844
- return_mask_function=False,
845
- )
846
-
847
- return super().forward(
848
- inputs_embeds,
849
- attention_mask=attention_mask,
850
- output_attentions=output_attentions,
851
- output_hidden_states=output_hidden_states,
852
- return_dict=return_dict,
853
- cu_seqlens=cu_seqlens,
854
- max_seqlen=max_seqlen,
855
- **kwargs,
856
- )
857
 
858
 
859
  def create_pixel_shuffle_index_map(
@@ -949,15 +921,15 @@ def pixel_shuffle_varlen(
949
  Raises:
950
  ValueError: If more than one batch item is provided.
951
  """
952
- keep_batch_dim = x.dim() == 3
953
- if keep_batch_dim:
954
  if x.size(0) != 1:
955
  raise AssertionError("Packed sequence is expected to have batch_size == 1")
956
- x_ = x.squeeze(0) # (seq, embed)
957
  else:
958
- x_ = x # (seq, embed)
959
 
960
- embed_dim = x_.size(-1)
961
  scale_factor = int(scale_factor)
962
 
963
  # Calculate seq_sizes from token_grids
@@ -968,17 +940,17 @@ def pixel_shuffle_varlen(
968
  seq_sizes=seq_sizes,
969
  token_grids=token_grids,
970
  scale_factor=scale_factor,
971
- device=x_.device,
972
  ) # (new_seq, scale_factor**2)
973
 
974
  # Gather → (new_seq, scale_factor**2, embed_dim)
975
- gathered = x_[gather_idx] # fancy indexing keeps gradient
976
 
977
  # Merge the scale_factor**2 group dimension into channels to finish the shuffle
978
  out = gathered.reshape(gathered.size(0), embed_dim * scale_factor * scale_factor)
979
 
980
  # Restore batch dimension if needed
981
- if keep_batch_dim:
982
  out = out.unsqueeze(0)
983
  return out
984
 
@@ -1007,14 +979,14 @@ class IsaacVisionTransformer(nn.Module):
1007
  # Generate cumulative sequence lengths for variable-length attention
1008
  cu_seqlens = torch.zeros(seq_sizes.size(0) + 1, dtype=torch.int32, device=hidden_states.device)
1009
  cu_seqlens[1:] = seq_sizes.cumsum(0)
1010
- max_seqlen = int(seq_sizes.max().item()) if seq_sizes.numel() > 0 else 0
 
1011
 
1012
  # Pass through encoder with variable-length attention parameters
1013
  encoder_outputs = self.encoder(
1014
  inputs_embeds=hidden_states,
 
1015
  cu_seqlens=cu_seqlens,
1016
- max_seqlen=max_seqlen,
1017
- return_dict=True,
1018
  )
1019
  hidden_states = encoder_outputs.last_hidden_state
1020
 
@@ -1033,6 +1005,24 @@ class IsaacVisionTransformer(nn.Module):
1033
  return hidden_states
1034
 
1035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036
  class IsaacVisionEmbedding(nn.Module):
1037
  """Vision embedding wrapper exposing tower and projector."""
1038
 
@@ -1041,14 +1031,9 @@ class IsaacVisionEmbedding(nn.Module):
1041
  def __init__(self, config: IsaacConfig):
1042
  super().__init__()
1043
  vision_cfg = config.vision_config
1044
- hidden_dim = vision_cfg.hidden_size * (vision_cfg.pixel_shuffle_scale_factor**2)
1045
 
1046
  self.vision_tower = IsaacVisionTransformer(vision_cfg)
1047
- self.multimodal_projector = nn.Sequential(
1048
- nn.Linear(hidden_dim, 4 * hidden_dim, bias=False),
1049
- nn.SiLU(),
1050
- nn.Linear(4 * hidden_dim, config.hidden_size, bias=False),
1051
- )
1052
 
1053
  def forward(self, vision_tokens: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
1054
  hidden_states = self.vision_tower(vision_tokens)
@@ -1145,31 +1130,6 @@ def get_image_size_for_max_num_patches(
1145
  return target_height, target_width
1146
 
1147
 
1148
- def patchify_vision(image: torch.Tensor, patch_size: int) -> torch.Tensor:
1149
- r"""Convert normalized images into flattened ViT-style patches.
1150
-
1151
- Args:
1152
- image (`torch.Tensor`):
1153
- Tensor of shape `(num_images, height, width, channels)`.
1154
- patch_size (`int`):
1155
- Edge length of the square patches
1156
-
1157
- Returns:
1158
- `torch.Tensor`:
1159
- Patch tensor where each position stores the flattened pixels belonging to that patch.
1160
-
1161
- Raises:
1162
- ValueError: If `height` or `width` is not divisible by `patch_size`.
1163
- """
1164
- num_images, height, width, channels = image.shape
1165
- if height % patch_size or width % patch_size:
1166
- raise ValueError(f"Dimensions of images {image.shape} are not divisible by patch_size={patch_size}.")
1167
- patches = image.reshape(num_images, height // patch_size, patch_size, width // patch_size, patch_size, channels)
1168
- patches = patches.permute(0, 1, 3, 2, 4, 5)
1169
- patches = patches.reshape(num_images, height // patch_size, width // patch_size, channels * patch_size * patch_size)
1170
- return patches
1171
-
1172
-
1173
  class IsaacConfig(PretrainedConfig):
1174
  """Configuration class for Isaac multimodal model.
1175
 
@@ -1190,25 +1150,25 @@ class IsaacConfig(PretrainedConfig):
1190
  vision_token: str = "<image>",
1191
  **kwargs,
1192
  ):
1193
- self._rope_parameters: Optional[dict[str, Any]] = None
1194
  attn_implementation = kwargs.get("attn_implementation")
1195
 
1196
  if isinstance(text_config, dict):
1197
  self.text_config = self.sub_configs["text_config"](**text_config)
 
 
1198
  elif text_config is None:
1199
  self.text_config = self.sub_configs["text_config"]()
1200
 
1201
- super().__init__(**kwargs)
 
 
1202
 
1203
- if self._rope_scaling is None:
1204
- self._rope_scaling = getattr(self.text_config, "rope_scaling", None)
1205
- else:
1206
- self.text_config.rope_scaling = self._rope_scaling
1207
 
1208
- # Keep rope parameters alias in sync with upstream expectations
1209
- self._rope_parameters = self._rope_scaling
1210
 
1211
- # Mirror frequently accessed Qwen3 attributes at the composite config level for BC.
1212
  self.vocab_size = self.text_config.vocab_size
1213
  self.hidden_size = self.text_config.hidden_size
1214
  self.num_hidden_layers = self.text_config.num_hidden_layers
@@ -1216,10 +1176,7 @@ class IsaacConfig(PretrainedConfig):
1216
  self.head_dim = self.text_config.head_dim
1217
  self.hidden_act = self.text_config.hidden_act
1218
  self.use_cache = self.text_config.use_cache
1219
- self.rope_theta = self.text_config.rope_parameters["rope_theta"]
1220
-
1221
- # Validate rotary parameters now that they have been mirrored locally.
1222
- rope_config_validation(self)
1223
 
1224
  self.layer_types = getattr(self.text_config, "layer_types", None)
1225
  layer_type_validation(self.layer_types, self.num_hidden_layers)
@@ -1248,33 +1205,6 @@ class IsaacConfig(PretrainedConfig):
1248
  self.max_sequence_length = max_sequence_length
1249
  self.vision_token = vision_token
1250
 
1251
- @property
1252
- def rope_scaling(self):
1253
- if hasattr(self, "text_config") and self.text_config is not None:
1254
- return getattr(self.text_config, "rope_scaling", None)
1255
- return self._rope_scaling
1256
-
1257
- @rope_scaling.setter
1258
- def rope_scaling(self, value):
1259
- self._rope_scaling = value
1260
- if hasattr(self, "text_config") and self.text_config is not None:
1261
- self.text_config.rope_scaling = value
1262
-
1263
- @property
1264
- def rope_parameters(self) -> dict[str, Any] | None:
1265
- """Alias introduced upstream for rope scaling dictionaries."""
1266
- value = self._rope_parameters
1267
- if value is None:
1268
- value = self.rope_scaling
1269
- if value is None:
1270
- return {"rope_type": "default"}
1271
- return value
1272
-
1273
- @rope_parameters.setter
1274
- def rope_parameters(self, value: dict[str, Any] | None) -> None:
1275
- self._rope_parameters = value
1276
- self.rope_scaling = value
1277
-
1278
  def to_dict(self):
1279
  output = super().to_dict()
1280
  # Ensure nested configs round-trip through dict serialization
@@ -1336,7 +1266,7 @@ def create_text_event(tokenizer: AutoTokenizer, text: str, time: float = 0.0) ->
1336
  class IsaacProcessor(ProcessorMixin):
1337
  attributes = ["image_processor", "tokenizer"]
1338
  image_processor_class = ("IsaacImageProcessorFast",)
1339
- tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
1340
 
1341
  def __init__(
1342
  self,
@@ -1516,12 +1446,10 @@ def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor:
1516
  return position_ids
1517
 
1518
 
1519
- class IsaacRotaryEmbedding(nn.Module):
1520
  EXTRA_ROPE_KEYS = {"mrope_section", "mrope_interleaved"}
1521
 
1522
  def __init__(self, config: IsaacConfig, device=None):
1523
- super().__init__()
1524
-
1525
  rope_source_cfg = config.get_text_config() if hasattr(config, "get_text_config") else config
1526
  rope_scaling = getattr(rope_source_cfg, "rope_scaling", None) or {}
1527
 
@@ -1530,9 +1458,9 @@ class IsaacRotaryEmbedding(nn.Module):
1530
  config_for_rope.rope_scaling = sanitized_scaling if sanitized_scaling else None
1531
 
1532
  init_device = device if device is not None and getattr(device, "type", None) != "meta" else None
1533
- self._qwen_rotary = qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding(config_for_rope, device=init_device)
1534
 
1535
- rotary_half_dim = self._qwen_rotary.inv_freq.shape[0]
1536
  self.mrope_section = self._resolve_mrope_section(rope_scaling.get("mrope_section"), rotary_half_dim)
1537
  self.hidden_size = getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size
1538
 
@@ -1558,10 +1486,6 @@ class IsaacRotaryEmbedding(nn.Module):
1558
  chunks = tensor.split(split_sections, dim=-1)
1559
  return torch.cat([chunk[i % 3] for i, chunk in enumerate(chunks)], dim=-1)
1560
 
1561
- @property
1562
- def inv_freq(self) -> torch.Tensor:
1563
- return self._qwen_rotary.inv_freq
1564
-
1565
  def forward(
1566
  self,
1567
  position_ids: torch.Tensor,
@@ -1593,7 +1517,7 @@ class IsaacRotaryEmbedding(nn.Module):
1593
 
1594
  pos_axes = pos.permute(2, 0, 1).contiguous()
1595
 
1596
- cos_axes, sin_axes = self._qwen_rotary(hidden_states, pos_axes)
1597
 
1598
  cos_axes = cos_axes.to(hidden_states.dtype)
1599
  sin_axes = sin_axes.to(hidden_states.dtype)
@@ -1608,6 +1532,7 @@ class IsaacModel(Qwen3PreTrainedModel):
1608
  supports_gradient_checkpointing = True
1609
  _can_compile_fullgraph = False
1610
  _supports_flex_attn = False
 
1611
  # Expose tied-weights mapping even if empty for base model tests.
1612
  all_tied_weights_keys: dict[str, str] = {}
1613
 
@@ -1667,12 +1592,8 @@ class IsaacModel(Qwen3PreTrainedModel):
1667
  self.text_model.embed_tokens = value
1668
 
1669
  @property
1670
- def layers(self) -> nn.ModuleList:
1671
- return self.text_model.layers
1672
-
1673
- @property
1674
- def norm(self) -> nn.Module:
1675
- return self.text_model.norm
1676
 
1677
  @property
1678
  def vision_model(self) -> nn.Module:
@@ -1729,6 +1650,62 @@ class IsaacModel(Qwen3PreTrainedModel):
1729
  h = embedded_ts.compact() # (B, T, D)
1730
  return h
1731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732
  @auto_docstring
1733
  @check_model_inputs
1734
  def forward(
@@ -1741,11 +1718,8 @@ class IsaacModel(Qwen3PreTrainedModel):
1741
  past_key_values: Optional[list[torch.FloatTensor]] = None,
1742
  inputs_embeds: Optional[torch.FloatTensor] = None,
1743
  use_cache: Optional[bool] = None,
1744
- output_attentions: Optional[bool] = None,
1745
- output_hidden_states: Optional[bool] = None,
1746
- return_dict: Optional[bool] = None,
1747
  cache_position: Optional[torch.LongTensor] = None,
1748
- **kwargs,
1749
  ) -> tuple | BaseModelOutputWithPast:
1750
  """
1751
  Forward pass with MRoPE position embeddings.
@@ -1763,122 +1737,56 @@ class IsaacModel(Qwen3PreTrainedModel):
1763
  omitted.
1764
  """
1765
 
1766
- if modality_tensor is not None:
1767
- modality_tensor = modality_tensor.to(dtype=torch.long)
1768
- text_value = TextType.text.value if TextType is not None else 0
1769
 
1770
  # Get inputs
1771
-
1772
  if tensor_stream is not None and inputs_embeds is not None:
1773
  raise ValueError("You cannot specify both tensor_stream and inputs_embeds")
1774
- elif tensor_stream is not None:
1775
- # Embed TensorStream directly
1776
- inputs_embeds = self.embed_stream(tensor_stream)
1777
- # Create modality tensor if not provided
1778
- if modality_tensor is None:
1779
- modality_tensor = modality_mask(tensor_stream)
1780
- elif input_ids is not None and inputs_embeds is not None:
1781
  raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
 
 
 
 
1782
  elif input_ids is not None:
1783
  inputs_embeds = self.text_model.embed_tokens(input_ids)
1784
- # Create text modality tensor if not provided
1785
- if modality_tensor is None:
1786
- batch_size, seq_length = input_ids.shape
1787
- modality_tensor = torch.full(
1788
- (batch_size, seq_length), text_value, device=input_ids.device, dtype=torch.long
1789
- )
1790
- elif inputs_embeds is not None:
1791
- # Inputs provided directly as embeddings (no input_ids/tensor_stream)
1792
- if modality_tensor is None:
1793
- batch_size, seq_length = inputs_embeds.shape[:2]
1794
- modality_tensor = torch.full(
1795
- (batch_size, seq_length), text_value, device=inputs_embeds.device, dtype=torch.long
1796
- )
1797
- if attention_mask is None:
1798
- attention_mask = torch.ones(
1799
- (inputs_embeds.shape[0], inputs_embeds.shape[1]), device=inputs_embeds.device, dtype=torch.long
1800
- )
1801
- else:
1802
  raise ValueError("You have to specify either tensor_stream, input_ids or inputs_embeds")
1803
 
 
 
1804
  # Ensure cache exists when requested
1805
  if use_cache and past_key_values is None:
1806
  cache_config = self.config.get_text_config() if hasattr(self.config, "get_text_config") else self.config
1807
  past_key_values = DynamicCache(config=cache_config)
1808
 
1809
- if cache_position is None and (past_key_values is not None or use_cache):
1810
  past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1811
- cache_position = torch.arange(
1812
- past_seen_tokens,
1813
- past_seen_tokens + inputs_embeds.shape[1],
1814
- device=inputs_embeds.device,
1815
- )
1816
-
1817
- # Create default position_ids if not provided
1818
- if position_ids is None:
1819
- if tensor_stream is not None:
1820
- position_ids = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
1821
- elif cache_position is not None:
1822
- batch_size = modality_tensor.shape[0] if modality_tensor is not None else inputs_embeds.shape[0]
1823
- position_ids = cache_position.view(1, -1).expand(batch_size, -1)
1824
- elif input_ids is not None:
1825
- position_ids = compute_position_ids_input_ids(input_ids)
1826
- else:
1827
- batch_size, seq_length = inputs_embeds.shape[:2]
1828
- dummy_ids = torch.zeros((batch_size, seq_length), device=inputs_embeds.device, dtype=torch.long)
1829
- position_ids = compute_position_ids_input_ids(dummy_ids)
1830
 
1831
  if attention_mask is None:
1832
- attention_mask = torch.ones(
1833
- (inputs_embeds.shape[0], inputs_embeds.shape[1]), device=inputs_embeds.device, dtype=torch.long
1834
- )
1835
 
1836
- # Expand 2D position ids (from generic padding tests or decode cache positions) to 3D MRoPE coords
1837
- if position_ids is not None and position_ids.ndim == 2:
1838
- position_ids = position_ids.to(device=inputs_embeds.device)
1839
- position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
1840
-
1841
- # Align lengths so rotary embedding sees matching shapes
1842
- seq_len = inputs_embeds.shape[1]
1843
- if position_ids is not None and position_ids.shape[1] != seq_len:
1844
- start_positions = position_ids[:, :1, 0]
1845
- position_ids = torch.arange(seq_len, device=inputs_embeds.device).view(1, -1)
1846
- position_ids = position_ids + start_positions
1847
- position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
1848
-
1849
- if modality_tensor.shape[1] != seq_len:
1850
- if modality_tensor.shape[1] > seq_len:
1851
- modality_tensor = modality_tensor[:, :seq_len]
1852
- else:
1853
- pad = modality_tensor[:, -1:].expand(-1, seq_len - modality_tensor.shape[1])
1854
- modality_tensor = torch.cat([modality_tensor, pad], dim=1)
1855
-
1856
- # Compute MRoPE position embeddings if we have custom rotary_emb
1857
- cos, sin = self.rotary_emb(
1858
- position_ids,
1859
- modality_tensor,
1860
- hidden_states=inputs_embeds,
1861
  )
1862
- cos = cos.to(inputs_embeds.dtype)
1863
- sin = sin.to(inputs_embeds.dtype)
1864
-
1865
- # Flash attention expects 1D position_ids; keep 3D only for rotary phases
1866
- decoder_position_ids = position_ids
1867
- if position_ids is not None and position_ids.ndim == 3:
1868
- decoder_position_ids = position_ids[..., 0]
1869
 
1870
  # Prepare attention mask
1871
-
1872
  if not isinstance(attention_mask, dict):
1873
- mask_kwargs = {
1874
- "config": self.config,
1875
- "input_embeds": inputs_embeds,
1876
- "attention_mask": attention_mask,
1877
- "cache_position": cache_position,
1878
- "past_key_values": past_key_values,
1879
- "position_ids": decoder_position_ids,
1880
- }
1881
- attention_mask = create_masks_for_generate(**mask_kwargs)
 
1882
 
1883
  # Initialize hidden states
1884
  hidden_states = inputs_embeds
@@ -1886,7 +1794,7 @@ class IsaacModel(Qwen3PreTrainedModel):
1886
 
1887
  for decoder_layer in self.text_model.layers:
1888
  layer_attention_mask = (
1889
- attention_mask[decoder_layer.attention_type] if isinstance(attention_mask, dict) else attention_mask
1890
  )
1891
  layer_outputs = decoder_layer(
1892
  hidden_states,
@@ -1900,12 +1808,10 @@ class IsaacModel(Qwen3PreTrainedModel):
1900
  **kwargs,
1901
  )
1902
 
1903
- if isinstance(layer_outputs, tuple):
1904
- hidden_states = layer_outputs[0]
1905
- if output_attentions:
1906
- all_attentions.append(layer_outputs[1])
1907
- else:
1908
- hidden_states = layer_outputs
1909
 
1910
  # Final layer norm
1911
  hidden_states = self.text_model.norm(hidden_states)
@@ -1926,19 +1832,6 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
1926
  _tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
1927
  all_tied_weights_keys: dict[str, str] = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
1928
 
1929
- def set_input_embeddings(self, value: nn.Module) -> None:
1930
- self.model.set_input_embeddings(value)
1931
- vocab_size = getattr(value, "num_embeddings", None)
1932
- if vocab_size is not None:
1933
- self.config.vocab_size = vocab_size
1934
- self.model.config.vocab_size = vocab_size
1935
- if hasattr(self.model, "text_model"):
1936
- self.model.text_model.config.vocab_size = vocab_size
1937
- if self.lm_head.weight.shape[0] != vocab_size:
1938
- self.lm_head = nn.Linear(self.config.hidden_size, vocab_size, bias=False)
1939
- if hasattr(self.model, "embed_tokens"):
1940
- self.lm_head.weight = self.model.text_model.embed_tokens.weight
1941
-
1942
  def __init__(self, config: IsaacConfig):
1943
  super().__init__(config)
1944
  self.model = IsaacModel(config) # Use our custom model
@@ -1947,39 +1840,6 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
1947
  # Tracks rotary position offsets computed during a full forward pass so decode steps can reuse them.
1948
  self.rope_deltas = None
1949
 
1950
- def get_rope_index(
1951
- self,
1952
- input_ids: Optional[torch.Tensor],
1953
- tensor_stream: Optional[TensorStream],
1954
- attention_mask: Optional[torch.Tensor],
1955
- ) -> tuple[torch.Tensor, torch.Tensor]:
1956
- """Compute MRoPE position ids from a TensorStream (or 1D fallback).
1957
-
1958
- Returns (position_ids, rope_deltas). position_ids is (B,L,3) for MRoPE.
1959
- rope_deltas is (B,1) used to advance positions in decode.
1960
- """
1961
- # tensor_stream present: compute 3D coords
1962
- if tensor_stream is None and input_ids is None:
1963
- raise ValueError("`tensor_stream` or `input_ids` must be provided to compute rope indices")
1964
-
1965
- if tensor_stream is not None:
1966
- pos_3d = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
1967
- else:
1968
- pos_3d = compute_position_ids_input_ids(input_ids)
1969
- B, L, _ = pos_3d.shape
1970
-
1971
- # Max position per batch across the 3 planes and sequence dimension: (B,)
1972
- m_per_batch = pos_3d.amax(dim=(1, 2))
1973
-
1974
- # Sequence lengths per batch: (B,)
1975
- if attention_mask is None:
1976
- seq_lens = torch.full_like(m_per_batch, L)
1977
- else:
1978
- seq_lens = attention_mask.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=m_per_batch.device)
1979
-
1980
- rope_deltas = (m_per_batch + 1 - seq_lens).to(dtype=pos_3d.dtype).unsqueeze(1)
1981
- return pos_3d, rope_deltas
1982
-
1983
  def forward(
1984
  self,
1985
  input_ids: Optional[torch.LongTensor] = None,
@@ -1990,11 +1850,8 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
1990
  inputs_embeds: Optional[torch.FloatTensor] = None,
1991
  labels: Optional[torch.LongTensor] = None,
1992
  use_cache: Optional[bool] = None,
1993
- output_attentions: Optional[bool] = None,
1994
- output_hidden_states: Optional[bool] = None,
1995
- return_dict: Optional[bool] = None,
1996
  cache_position: Optional[torch.LongTensor] = None,
1997
- **kwargs,
1998
  ) -> tuple | CausalLMOutputWithPast:
1999
  r"""
2000
  Forward pass for conditional generation supporting both standard inputs and TensorStream.
@@ -2005,70 +1862,43 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
2005
  `input_ids`.
2006
  """
2007
 
2008
- # Don't compute embeddings here - let the model handle it
 
 
2009
  if tensor_stream is not None:
2010
  input_ids = None
2011
  if input_ids is None and inputs_embeds is None and tensor_stream is None:
2012
  raise ValueError("Either input_ids, inputs_embeds, or tensor_stream must be provided.")
2013
 
2014
- # Build position ids (MRoPE) if needed and tensor_stream is available
2015
- # During decode we reuse `self.rope_deltas` computed on the initial forward pass; `rope_delta` captures how far
2016
- # cached rotary phases have progressed so we can advance `position_ids` without rebuilding the TensorStream.
2017
  if position_ids is None and tensor_stream is not None:
2018
  position_ids, self.rope_deltas = self.get_rope_index(input_ids, tensor_stream, attention_mask)
2019
- elif position_ids is None and input_ids is not None:
2020
- # For text inputs build position ids and modality tensor
2021
- position_ids = compute_position_ids_input_ids(input_ids)
2022
- if cache_position is not None and self.rope_deltas is not None:
2023
- # Combine the incremental decode step (`cache_position`) with cached offsets so hidden states continue
2024
- # rotating in lockstep across generation steps.
2025
- rope_delta = (cache_position[0] + self.rope_deltas).to(input_ids.device)
2026
- else:
2027
- rope_delta = 0
2028
- if cache_position is not None and not isinstance(rope_delta, int): # otherwise `deltas` is an int `0`
2029
- batch_size = input_ids.shape[0]
2030
- rope_delta = rope_delta.repeat_interleave(batch_size // rope_delta.shape[0], dim=0)
2031
- position_ids = position_ids.add(rope_delta)
2032
- elif position_ids is None and inputs_embeds is not None:
2033
- batch_size, seq_len = inputs_embeds.shape[:2]
2034
- dummy_ids = torch.zeros((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
2035
- position_ids = compute_position_ids_input_ids(dummy_ids)
2036
-
2037
- if attention_mask is None:
2038
  if input_ids is not None:
2039
- batch_size, seq_len = input_ids.shape
2040
- attention_mask = torch.ones((batch_size, seq_len), device=input_ids.device, dtype=torch.long)
2041
- elif inputs_embeds is not None:
 
2042
  batch_size, seq_len = inputs_embeds.shape[:2]
2043
- attention_mask = torch.ones((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
 
2044
 
2045
- text_value = TextType.text.value if TextType is not None else 0
2046
-
2047
- if tensor_stream is not None:
2048
- modality_tensor = modality_mask(tensor_stream)
2049
- elif input_ids is not None:
2050
- batch_size, seq_len = input_ids.shape
2051
- modality_tensor = torch.full(
2052
- (batch_size, seq_len), text_value, device=position_ids.device, dtype=torch.long
2053
- )
2054
- else:
2055
- batch_size, seq_len = inputs_embeds.shape[:2]
2056
- modality_tensor = torch.full(
2057
- (batch_size, seq_len), text_value, device=position_ids.device, dtype=torch.long
2058
- )
2059
 
2060
  outputs = self.model(
2061
  input_ids=input_ids,
2062
  tensor_stream=tensor_stream,
2063
  attention_mask=attention_mask,
2064
  position_ids=position_ids,
2065
- modality_tensor=modality_tensor,
2066
  past_key_values=past_key_values,
2067
  inputs_embeds=inputs_embeds,
2068
  use_cache=use_cache,
2069
  output_attentions=output_attentions,
2070
- output_hidden_states=output_hidden_states,
2071
- return_dict=return_dict,
2072
  cache_position=cache_position,
2073
  **kwargs,
2074
  )
@@ -2088,6 +1918,52 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
2088
  attentions=outputs.attentions if output_attentions else None,
2089
  )
2090
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2091
  def prepare_inputs_for_generation(
2092
  self,
2093
  input_ids: torch.LongTensor,
@@ -2143,22 +2019,11 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
2143
  else:
2144
  model_inputs["tensor_stream"] = None
2145
 
2146
- # TensorStream decode path: preserve rotary offsets from prefill
2147
  if tensor_stream is not None and not first_step and self.rope_deltas is not None:
2148
  model_inputs["position_ids"] = None
2149
  return model_inputs
2150
 
2151
- # For decode steps, synthesize position_ids that continue from the cache offsets
2152
- if model_inputs.get("position_ids") is None and cache_position is not None and not first_step:
2153
- batch_size = 1
2154
- if model_inputs.get("input_ids") is not None:
2155
- batch_size = model_inputs["input_ids"].shape[0]
2156
- elif model_inputs.get("inputs_embeds") is not None:
2157
- batch_size = model_inputs["inputs_embeds"].shape[0]
2158
- pos_ids = cache_position.view(1, -1).expand(batch_size, -1)
2159
- pos_ids = pos_ids.unsqueeze(-1).expand(-1, -1, 3)
2160
- model_inputs["position_ids"] = pos_ids
2161
-
2162
  return model_inputs
2163
 
2164
  @classmethod
@@ -2166,13 +2031,6 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
2166
  return True
2167
 
2168
 
2169
- AutoImageProcessor.register(
2170
- IsaacConfig,
2171
- fast_image_processor_class=IsaacImageProcessorFast,
2172
- exist_ok=True,
2173
- )
2174
-
2175
-
2176
  def _compute_residual_p_frames(frames: torch.Tensor, is_p_frame: list[bool]) -> torch.Tensor:
2177
  """Compute residuals for P-frames to stay in sync with the training pipeline."""
2178
  if not any(is_p_frame):
 
117
  PILImageResampling,
118
  )
119
  from transformers.modeling_attn_mask_utils import AttentionMaskConverter
120
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, BaseModelOutput
121
  from transformers.modeling_rope_utils import rope_config_validation
122
  from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
123
  from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
 
130
  Siglip2EncoderLayer,
131
  Siglip2VisionEmbeddings,
132
  )
133
+ from transformers.masking_utils import (
134
+ ALL_MASK_ATTENTION_FUNCTIONS,
135
+ create_masks_for_generate,
136
+ eager_mask,
137
+ packed_sequence_mask_function,
138
+ sdpa_mask,
139
+ )
140
  from transformers.processing_utils import ImagesKwargs, ProcessorMixin, Unpack
141
  from transformers.utils import auto_docstring, TensorType
142
+ from transformers.utils.generic import OutputRecorder, can_return_tuple, check_model_inputs
143
+ from transformers.models.pix2struct.image_processing_pix2struct_fast import torch_extract_patches
144
 
145
  # Vision preprocessing constants
146
  from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN
 
148
  from transformers.utils.import_utils import is_torchdynamo_compiling
149
 
150
  try:
151
+ from genesis.public.tensorstream.tensor_stream import (
152
+ Event,
153
+ Stream,
154
+ TensorStream,
155
+ TextType,
156
+ VisionType,
157
+ create_stream,
158
+ group_streams,
159
+ )
160
+ from genesis.public.tensorstream.tensor_stream_utils import (
161
+ compute_mrope_pos_tensor,
162
+ modality_mask,
163
+ reconstruct_tensor_stream_from_compact_dict,
164
+ tensor_stream_token_view,
165
+ )
166
+ from genesis.public.tensorstream.tensor_stream_utils import (
167
+ slice as ts_slice,
168
+ )
169
  except ModuleNotFoundError as exc: # pragma: no cover - import guard
170
  raise ModuleNotFoundError(
171
  "genesis.public.tensorstream is required for the Isaac HuggingFace integration. "
 
229
  self._attn_implementation = "sdpa"
230
 
231
 
232
+ class IsaacImageProcessorFastKwargs(ImagesKwargs, total=False):
233
  patch_size: Optional[int]
234
  max_num_patches: Optional[int]
235
  min_num_patches: Optional[int]
 
243
 
244
  resample = PILImageResampling.BILINEAR
245
  model_input_names = ["patches", "token_grids"]
246
+ valid_kwargs = IsaacImageProcessorFastKwargs
247
  unused_kwargs = ["size", "do_center_crop", "crop_size"]
248
 
249
  do_resize = True
 
 
250
  do_center_crop = False
 
251
  patch_size: Optional[int] = 16
252
  max_num_patches: Optional[int] = 256
253
  min_num_patches: Optional[int] = None
254
  pixel_shuffle_scale: Optional[int] = 1
255
  do_pad = False
 
256
  do_rescale = True
 
257
  do_normalize = True
258
  image_mean = list(VISION_MEAN)
259
  image_std = list(VISION_STD)
260
  do_convert_rgb = True
 
 
 
 
261
  disable_grouping = False
262
  size_divisor: Optional[int] = None
263
 
264
  def __init__(
265
  self,
266
+ **kwargs: Unpack[IsaacImageProcessorFastKwargs],
267
  ) -> None:
268
  super().__init__(**kwargs)
269
 
 
399
  nhwc_images = image_batch.permute(0, 2, 3, 1)
400
  nhwc_images = _compute_residual_p_frames(nhwc_images, is_p_frame=[False] * batch_size)
401
 
402
+ patches = torch_extract_patches(nhwc_images.permute(0, 3, 1, 2), patch_size, patch_size)
403
  _, height_tokens, width_tokens, _ = patches.shape
404
 
405
  token_grid = (
 
488
  return packed_sequence_mask_function(packed_sequence_mask)
489
 
490
 
491
+ def create_document_attention_mask(
492
+ config: PretrainedConfig,
493
+ input_embeds: torch.Tensor,
494
  cu_seqlens: Optional[torch.Tensor],
495
+ ) -> Optional[Union[torch.Tensor, Any]]:
496
+ """Materialize a backend-specific block-diagonal attention mask.
 
 
 
 
 
 
 
 
 
 
497
 
498
+ This uses the standard `masking_utils` mask interface (same mechanism as Llama4),
499
+ so the returned object matches the selected attention backend (e.g. SDPA bool mask,
500
+ eager additive mask, or flex `BlockMask`).
501
+ """
502
 
503
+ mask_function = document_mask_function_from_cu_seqlens(cu_seqlens)
504
+ if mask_function is None:
505
  return None
506
 
507
+ seq_len = input_embeds.shape[1]
508
+ cache_position = torch.arange(seq_len, device=input_embeds.device, dtype=torch.long)
509
+
510
+ mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
511
+ return mask_interface(
512
+ batch_size=input_embeds.shape[0],
513
+ cache_position=cache_position,
514
+ kv_length=seq_len,
515
+ kv_offset=0,
516
+ mask_function=mask_function,
517
+ attention_mask=None,
518
+ allow_is_causal_skip=False,
519
+ allow_is_bidirectional_skip=False,
520
+ dtype=input_embeds.dtype,
521
+ config=config,
522
+ use_vmap=False,
523
+ )
524
 
525
 
526
  class IsaacVisionEmbeddings(nn.Module):
 
678
  self,
679
  hidden_states: torch.Tensor,
680
  attention_mask: Optional[torch.Tensor] = None,
 
 
681
  output_attentions: bool = False,
 
682
  cu_seqlens: Optional[torch.Tensor] = None,
683
  max_seqlen: Optional[int] = None,
684
  **kwargs,
685
  ):
 
 
 
 
686
  kwargs.pop("output_hidden_states", None)
687
  kwargs.pop("return_dict", None)
688
 
 
695
  keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
696
  values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
697
 
698
+ attn_impl = self.config._attn_implementation
 
 
 
 
 
 
 
 
 
 
 
 
699
  attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"]
700
+ if attn_impl != "sdpa":
701
+ attention_interface = ALL_ATTENTION_FUNCTIONS[attn_impl]
702
 
703
  dropout = 0.0 if not self.training else self.dropout
704
  attention_kwargs: dict[str, Any] = {
 
706
  "scaling": self.scale,
707
  "dropout": dropout,
708
  }
709
+
710
+ supports_varlen = cu_seqlens is not None and attn_impl in {
711
+ "flash_attention_2",
712
+ "flash_attention_3",
713
+ "flex_attention",
714
+ "paged|flash_attention_2",
715
+ "paged|flash_attention_3",
716
+ }
717
+
718
+ if output_attentions and attn_impl == "eager":
719
  attention_kwargs["output_attentions"] = True
720
 
721
+ if supports_varlen:
722
+ if max_seqlen is not None:
723
+ max_q = max_k = int(max_seqlen)
724
+ elif cu_seqlens.numel() >= 2:
725
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
726
+ max_q = max_k = lengths.max() if lengths.numel() > 0 else seq_length
727
+ else:
728
+ max_q = max_k = seq_length
729
+
730
+ attention_kwargs.update(
731
+ {
732
+ "cu_seq_lens_q": cu_seqlens,
733
+ "cu_seq_lens_k": cu_seqlens,
734
+ "max_length_q": max_q,
735
+ "max_length_k": max_k,
736
+ }
737
+ )
738
+
739
  attn_output, attn_weights = attention_interface(
740
  self,
741
  queries,
 
758
 
759
  return attn_output, attn_weights
760
 
 
 
 
 
 
 
761
 
762
  class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
763
  """Isaac vision encoder layer with variable-length attention."""
 
783
  Maximum document length referenced by `cu_seqlens`. Passed to FlashAttention so it can size temporary
784
  buffers for packed variable-length attention.
785
  """
 
 
 
 
 
 
 
 
 
786
  # Run attention directly so variable-length metadata reaches FlashAttention.
787
  residual = hidden_states
788
  hidden_states = self.layer_norm1(hidden_states)
789
+ attn_output, _ = self.self_attn(
790
  hidden_states,
791
  attention_mask=attention_mask,
792
  cu_seqlens=cu_seqlens,
793
  max_seqlen=max_seqlen,
 
794
  **kwargs,
795
  )
 
 
 
 
796
  hidden_states = residual + attn_output
797
 
798
  residual = hidden_states
 
800
  hidden_states = self.mlp(hidden_states)
801
  hidden_states = residual + hidden_states
802
 
 
 
803
  return hidden_states
804
 
805
 
 
811
  self.layers = nn.ModuleList([IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
812
 
813
  @can_return_tuple
814
+ @check_model_inputs
815
  def forward(
816
  self,
817
  inputs_embeds,
818
  attention_mask: Optional[torch.Tensor] = None,
 
 
 
 
 
819
  **kwargs: Unpack[TransformersKwargs],
820
  ):
821
+ hidden_states = inputs_embeds
822
+ for encoder_layer in self.layers:
823
+ hidden_states = encoder_layer(
824
+ hidden_states,
825
+ attention_mask,
826
+ **kwargs,
827
+ )
828
+ return BaseModelOutput(last_hidden_state=hidden_states)
 
 
 
 
 
 
 
 
 
 
 
829
 
830
 
831
  def create_pixel_shuffle_index_map(
 
921
  Raises:
922
  ValueError: If more than one batch item is provided.
923
  """
924
+ return_with_batch_dim = x.dim() == 3
925
+ if return_with_batch_dim:
926
  if x.size(0) != 1:
927
  raise AssertionError("Packed sequence is expected to have batch_size == 1")
928
+ embeddings = x.squeeze(0) # (seq, embed)
929
  else:
930
+ embeddings = x # (seq, embed)
931
 
932
+ embed_dim = embeddings.size(-1)
933
  scale_factor = int(scale_factor)
934
 
935
  # Calculate seq_sizes from token_grids
 
940
  seq_sizes=seq_sizes,
941
  token_grids=token_grids,
942
  scale_factor=scale_factor,
943
+ device=embeddings.device,
944
  ) # (new_seq, scale_factor**2)
945
 
946
  # Gather → (new_seq, scale_factor**2, embed_dim)
947
+ gathered = embeddings[gather_idx] # fancy indexing keeps gradient
948
 
949
  # Merge the scale_factor**2 group dimension into channels to finish the shuffle
950
  out = gathered.reshape(gathered.size(0), embed_dim * scale_factor * scale_factor)
951
 
952
  # Restore batch dimension if needed
953
+ if return_with_batch_dim:
954
  out = out.unsqueeze(0)
955
  return out
956
 
 
979
  # Generate cumulative sequence lengths for variable-length attention
980
  cu_seqlens = torch.zeros(seq_sizes.size(0) + 1, dtype=torch.int32, device=hidden_states.device)
981
  cu_seqlens[1:] = seq_sizes.cumsum(0)
982
+
983
+ attention_mask = create_document_attention_mask(self.config, hidden_states, cu_seqlens)
984
 
985
  # Pass through encoder with variable-length attention parameters
986
  encoder_outputs = self.encoder(
987
  inputs_embeds=hidden_states,
988
+ attention_mask=attention_mask,
989
  cu_seqlens=cu_seqlens,
 
 
990
  )
991
  hidden_states = encoder_outputs.last_hidden_state
992
 
 
1005
  return hidden_states
1006
 
1007
 
1008
+ class IsaacMultiModalProjector(nn.Module):
1009
+ def __init__(self, config: IsaacConfig):
1010
+ super().__init__()
1011
+ self.vision_hidden_size = config.vision_config.hidden_size * (
1012
+ config.vision_config.pixel_shuffle_scale_factor**2
1013
+ )
1014
+ self.backbone_hidden_size = config.hidden_size
1015
+ self.linear_1 = nn.Linear(self.vision_hidden_size, 4 * self.vision_hidden_size, bias=False)
1016
+ self.silu = nn.SiLU()
1017
+ self.linear_2 = nn.Linear(4 * self.vision_hidden_size, self.backbone_hidden_size, bias=False)
1018
+
1019
+ def forward(self, image_features):
1020
+ hidden_states = self.linear_1(image_features)
1021
+ hidden_states = self.silu(hidden_states)
1022
+ hidden_states = self.linear_2(hidden_states)
1023
+ return hidden_states
1024
+
1025
+
1026
  class IsaacVisionEmbedding(nn.Module):
1027
  """Vision embedding wrapper exposing tower and projector."""
1028
 
 
1031
  def __init__(self, config: IsaacConfig):
1032
  super().__init__()
1033
  vision_cfg = config.vision_config
 
1034
 
1035
  self.vision_tower = IsaacVisionTransformer(vision_cfg)
1036
+ self.multimodal_projector = IsaacMultiModalProjector(config)
 
 
 
 
1037
 
1038
  def forward(self, vision_tokens: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
1039
  hidden_states = self.vision_tower(vision_tokens)
 
1130
  return target_height, target_width
1131
 
1132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133
  class IsaacConfig(PretrainedConfig):
1134
  """Configuration class for Isaac multimodal model.
1135
 
 
1150
  vision_token: str = "<image>",
1151
  **kwargs,
1152
  ):
 
1153
  attn_implementation = kwargs.get("attn_implementation")
1154
 
1155
  if isinstance(text_config, dict):
1156
  self.text_config = self.sub_configs["text_config"](**text_config)
1157
+ elif isinstance(text_config, Qwen3Config):
1158
+ self.text_config = text_config
1159
  elif text_config is None:
1160
  self.text_config = self.sub_configs["text_config"]()
1161
 
1162
+ # Seed RoPE parameters before base init so the shared mixin can standardize/validate them.
1163
+ self.rope_parameters = getattr(self.text_config, "rope_parameters", None)
1164
+ self.layer_types = getattr(self.text_config, "layer_types", None)
1165
 
1166
+ super().__init__(**kwargs)
 
 
 
1167
 
1168
+ # Keep rope parameters aligned between the composite and text sub-configs.
1169
+ self.text_config.rope_parameters = self.rope_parameters
1170
 
1171
+ # Mirror frequently accessed Qwen3 attributes at the composite config level
1172
  self.vocab_size = self.text_config.vocab_size
1173
  self.hidden_size = self.text_config.hidden_size
1174
  self.num_hidden_layers = self.text_config.num_hidden_layers
 
1176
  self.head_dim = self.text_config.head_dim
1177
  self.hidden_act = self.text_config.hidden_act
1178
  self.use_cache = self.text_config.use_cache
1179
+ self.rope_theta = self.rope_parameters["rope_theta"]
 
 
 
1180
 
1181
  self.layer_types = getattr(self.text_config, "layer_types", None)
1182
  layer_type_validation(self.layer_types, self.num_hidden_layers)
 
1205
  self.max_sequence_length = max_sequence_length
1206
  self.vision_token = vision_token
1207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1208
  def to_dict(self):
1209
  output = super().to_dict()
1210
  # Ensure nested configs round-trip through dict serialization
 
1266
  class IsaacProcessor(ProcessorMixin):
1267
  attributes = ["image_processor", "tokenizer"]
1268
  image_processor_class = ("IsaacImageProcessorFast",)
1269
+ tokenizer_class = ("Qwen2Tokenizer",)
1270
 
1271
  def __init__(
1272
  self,
 
1446
  return position_ids
1447
 
1448
 
1449
+ class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding):
1450
  EXTRA_ROPE_KEYS = {"mrope_section", "mrope_interleaved"}
1451
 
1452
  def __init__(self, config: IsaacConfig, device=None):
 
 
1453
  rope_source_cfg = config.get_text_config() if hasattr(config, "get_text_config") else config
1454
  rope_scaling = getattr(rope_source_cfg, "rope_scaling", None) or {}
1455
 
 
1458
  config_for_rope.rope_scaling = sanitized_scaling if sanitized_scaling else None
1459
 
1460
  init_device = device if device is not None and getattr(device, "type", None) != "meta" else None
1461
+ super().__init__(config_for_rope, device=init_device)
1462
 
1463
+ rotary_half_dim = self.inv_freq.shape[0]
1464
  self.mrope_section = self._resolve_mrope_section(rope_scaling.get("mrope_section"), rotary_half_dim)
1465
  self.hidden_size = getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size
1466
 
 
1486
  chunks = tensor.split(split_sections, dim=-1)
1487
  return torch.cat([chunk[i % 3] for i, chunk in enumerate(chunks)], dim=-1)
1488
 
 
 
 
 
1489
  def forward(
1490
  self,
1491
  position_ids: torch.Tensor,
 
1517
 
1518
  pos_axes = pos.permute(2, 0, 1).contiguous()
1519
 
1520
+ cos_axes, sin_axes = super().forward(hidden_states, pos_axes)
1521
 
1522
  cos_axes = cos_axes.to(hidden_states.dtype)
1523
  sin_axes = sin_axes.to(hidden_states.dtype)
 
1532
  supports_gradient_checkpointing = True
1533
  _can_compile_fullgraph = False
1534
  _supports_flex_attn = False
1535
+ _can_record_outputs = {"attentions": OutputRecorder(IsaacVisionAttention, index=1)}
1536
  # Expose tied-weights mapping even if empty for base model tests.
1537
  all_tied_weights_keys: dict[str, str] = {}
1538
 
 
1592
  self.text_model.embed_tokens = value
1593
 
1594
  @property
1595
+ def vision_model(self) -> nn.Module:
1596
+ return self.vision_embedding.vision_tower
 
 
 
 
1597
 
1598
  @property
1599
  def vision_model(self) -> nn.Module:
 
1650
  h = embedded_ts.compact() # (B, T, D)
1651
  return h
1652
 
1653
+ @staticmethod
1654
+ def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor:
1655
+ return compute_position_ids_input_ids(input_ids)
1656
+
1657
+ def _prepare_position_and_modality(
1658
+ self,
1659
+ position_ids: Optional[torch.LongTensor],
1660
+ modality_tensor: Optional[torch.LongTensor],
1661
+ tensor_stream: Optional[TensorStream],
1662
+ inputs_embeds: torch.Tensor,
1663
+ cache_position: torch.LongTensor,
1664
+ ) -> tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor, torch.Tensor, torch.Tensor]:
1665
+ text_value = TextType.text.value if TextType is not None else 0
1666
+ batch_size, seq_len = inputs_embeds.shape[:2]
1667
+
1668
+ if modality_tensor is None:
1669
+ if tensor_stream is not None:
1670
+ modality_tensor = modality_mask(tensor_stream)
1671
+ else:
1672
+ modality_tensor = torch.full(
1673
+ (batch_size, seq_len), text_value, device=inputs_embeds.device, dtype=torch.long
1674
+ )
1675
+ else:
1676
+ modality_tensor = modality_tensor.to(device=inputs_embeds.device, dtype=torch.long)
1677
+ expected_shape = (batch_size, seq_len)
1678
+ if modality_tensor.shape != torch.Size(expected_shape):
1679
+ raise ValueError(
1680
+ f"modality_tensor must have shape (batch_size, seq_len) {expected_shape}, "
1681
+ f"but got {tuple(modality_tensor.shape)}"
1682
+ )
1683
+
1684
+ if position_ids is None:
1685
+ if tensor_stream is not None:
1686
+ position_ids = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
1687
+ else:
1688
+ position_ids = cache_position.view(1, -1).expand(modality_tensor.shape[0], -1)
1689
+
1690
+ if position_ids.ndim == 2:
1691
+ position_ids = position_ids.to(device=inputs_embeds.device)
1692
+ position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
1693
+
1694
+ if position_ids.shape[1] != seq_len:
1695
+ start_positions = position_ids[:, :1, 0]
1696
+ position_ids = torch.arange(seq_len, device=inputs_embeds.device).view(1, -1)
1697
+ position_ids = position_ids + start_positions
1698
+ position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
1699
+
1700
+ cos, sin = self.rotary_emb(
1701
+ position_ids,
1702
+ modality_tensor,
1703
+ hidden_states=inputs_embeds,
1704
+ )
1705
+
1706
+ decoder_position_ids = position_ids[..., 0] if position_ids.ndim == 3 else position_ids
1707
+ return position_ids, modality_tensor, decoder_position_ids, cos, sin
1708
+
1709
  @auto_docstring
1710
  @check_model_inputs
1711
  def forward(
 
1718
  past_key_values: Optional[list[torch.FloatTensor]] = None,
1719
  inputs_embeds: Optional[torch.FloatTensor] = None,
1720
  use_cache: Optional[bool] = None,
 
 
 
1721
  cache_position: Optional[torch.LongTensor] = None,
1722
+ **kwargs: Unpack[TransformersKwargs],
1723
  ) -> tuple | BaseModelOutputWithPast:
1724
  """
1725
  Forward pass with MRoPE position embeddings.
 
1737
  omitted.
1738
  """
1739
 
1740
+ output_attentions = kwargs.pop("output_attentions", None)
 
 
1741
 
1742
  # Get inputs
 
1743
  if tensor_stream is not None and inputs_embeds is not None:
1744
  raise ValueError("You cannot specify both tensor_stream and inputs_embeds")
1745
+ if tensor_stream is None and input_ids is not None and inputs_embeds is not None:
 
 
 
 
 
 
1746
  raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1747
+
1748
+ # Resolve the input source (TensorStream takes precedence over token ids).
1749
+ if tensor_stream is not None:
1750
+ inputs_embeds = self.embed_stream(tensor_stream)
1751
  elif input_ids is not None:
1752
  inputs_embeds = self.text_model.embed_tokens(input_ids)
1753
+ elif inputs_embeds is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1754
  raise ValueError("You have to specify either tensor_stream, input_ids or inputs_embeds")
1755
 
1756
+ batch_size, seq_len = inputs_embeds.shape[:2]
1757
+
1758
  # Ensure cache exists when requested
1759
  if use_cache and past_key_values is None:
1760
  cache_config = self.config.get_text_config() if hasattr(self.config, "get_text_config") else self.config
1761
  past_key_values = DynamicCache(config=cache_config)
1762
 
1763
+ if cache_position is None:
1764
  past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1765
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_len, device=inputs_embeds.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766
 
1767
  if attention_mask is None:
1768
+ attention_mask = torch.ones((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
 
 
1769
 
1770
+ position_ids, modality_tensor, decoder_position_ids, cos, sin = self._prepare_position_and_modality(
1771
+ position_ids=position_ids,
1772
+ modality_tensor=modality_tensor,
1773
+ tensor_stream=tensor_stream,
1774
+ inputs_embeds=inputs_embeds,
1775
+ cache_position=cache_position,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776
  )
 
 
 
 
 
 
 
1777
 
1778
  # Prepare attention mask
 
1779
  if not isinstance(attention_mask, dict):
1780
+ attention_mask = create_masks_for_generate(
1781
+ config=self.config,
1782
+ input_embeds=inputs_embeds,
1783
+ attention_mask=attention_mask,
1784
+ cache_position=cache_position,
1785
+ past_key_values=past_key_values,
1786
+ position_ids=decoder_position_ids,
1787
+ )
1788
+
1789
+ is_attention_mask_dict = isinstance(attention_mask, dict)
1790
 
1791
  # Initialize hidden states
1792
  hidden_states = inputs_embeds
 
1794
 
1795
  for decoder_layer in self.text_model.layers:
1796
  layer_attention_mask = (
1797
+ attention_mask[decoder_layer.attention_type] if is_attention_mask_dict else attention_mask
1798
  )
1799
  layer_outputs = decoder_layer(
1800
  hidden_states,
 
1808
  **kwargs,
1809
  )
1810
 
1811
+ layer_outputs_is_tuple = isinstance(layer_outputs, tuple)
1812
+ hidden_states = layer_outputs[0] if layer_outputs_is_tuple else layer_outputs
1813
+ if output_attentions and layer_outputs_is_tuple:
1814
+ all_attentions.append(layer_outputs[1])
 
 
1815
 
1816
  # Final layer norm
1817
  hidden_states = self.text_model.norm(hidden_states)
 
1832
  _tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
1833
  all_tied_weights_keys: dict[str, str] = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
1834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1835
  def __init__(self, config: IsaacConfig):
1836
  super().__init__(config)
1837
  self.model = IsaacModel(config) # Use our custom model
 
1840
  # Tracks rotary position offsets computed during a full forward pass so decode steps can reuse them.
1841
  self.rope_deltas = None
1842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1843
  def forward(
1844
  self,
1845
  input_ids: Optional[torch.LongTensor] = None,
 
1850
  inputs_embeds: Optional[torch.FloatTensor] = None,
1851
  labels: Optional[torch.LongTensor] = None,
1852
  use_cache: Optional[bool] = None,
 
 
 
1853
  cache_position: Optional[torch.LongTensor] = None,
1854
+ **kwargs: Unpack[TransformersKwargs],
1855
  ) -> tuple | CausalLMOutputWithPast:
1856
  r"""
1857
  Forward pass for conditional generation supporting both standard inputs and TensorStream.
 
1862
  `input_ids`.
1863
  """
1864
 
1865
+ output_attentions = kwargs.pop("output_attentions", None)
1866
+
1867
+ # Don't compute embeddings here - let the inner model handle it
1868
  if tensor_stream is not None:
1869
  input_ids = None
1870
  if input_ids is None and inputs_embeds is None and tensor_stream is None:
1871
  raise ValueError("Either input_ids, inputs_embeds, or tensor_stream must be provided.")
1872
 
1873
+ # Record rope deltas on prefill when TensorStream is provided; leave position_ids building to IsaacModel.
 
 
1874
  if position_ids is None and tensor_stream is not None:
1875
  position_ids, self.rope_deltas = self.get_rope_index(input_ids, tensor_stream, attention_mask)
1876
+ elif position_ids is None and cache_position is not None and self.rope_deltas is not None:
1877
+ # Decode continuation after TensorStream prefill: advance positions using cached rope offsets.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1878
  if input_ids is not None:
1879
+ base_position_ids = compute_position_ids_input_ids(input_ids)
1880
+ else:
1881
+ if inputs_embeds is None:
1882
+ raise ValueError("inputs_embeds must be provided when input_ids is None during decode")
1883
  batch_size, seq_len = inputs_embeds.shape[:2]
1884
+ dummy_ids = torch.zeros((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
1885
+ base_position_ids = compute_position_ids_input_ids(dummy_ids)
1886
 
1887
+ rope_delta = (cache_position[0] + self.rope_deltas).to(base_position_ids.device)
1888
+ if not isinstance(rope_delta, int):
1889
+ rope_delta = rope_delta.repeat_interleave(base_position_ids.shape[0] // rope_delta.shape[0], dim=0)
1890
+ position_ids = base_position_ids.add(rope_delta)
 
 
 
 
 
 
 
 
 
 
1891
 
1892
  outputs = self.model(
1893
  input_ids=input_ids,
1894
  tensor_stream=tensor_stream,
1895
  attention_mask=attention_mask,
1896
  position_ids=position_ids,
1897
+ modality_tensor=None,
1898
  past_key_values=past_key_values,
1899
  inputs_embeds=inputs_embeds,
1900
  use_cache=use_cache,
1901
  output_attentions=output_attentions,
 
 
1902
  cache_position=cache_position,
1903
  **kwargs,
1904
  )
 
1918
  attentions=outputs.attentions if output_attentions else None,
1919
  )
1920
 
1921
+ def set_input_embeddings(self, value: nn.Module) -> None:
1922
+ self.model.set_input_embeddings(value)
1923
+ vocab_size = getattr(value, "num_embeddings", None)
1924
+ if vocab_size is not None:
1925
+ self.config.vocab_size = vocab_size
1926
+ self.model.config.vocab_size = vocab_size
1927
+ if hasattr(self.model, "text_model"):
1928
+ self.model.text_model.config.vocab_size = vocab_size
1929
+ if self.lm_head.weight.shape[0] != vocab_size:
1930
+ self.lm_head = nn.Linear(self.config.hidden_size, vocab_size, bias=False)
1931
+ if hasattr(self.model, "embed_tokens"):
1932
+ self.lm_head.weight = self.model.text_model.embed_tokens.weight
1933
+
1934
+ def get_rope_index(
1935
+ self,
1936
+ input_ids: Optional[torch.Tensor],
1937
+ tensor_stream: Optional[TensorStream],
1938
+ attention_mask: Optional[torch.Tensor],
1939
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1940
+ """Compute MRoPE position ids from a TensorStream (or 1D fallback).
1941
+
1942
+ Returns (position_ids, rope_deltas). position_ids is (B,L,3) for MRoPE.
1943
+ rope_deltas is (B,1) used to advance positions in decode.
1944
+ """
1945
+ # tensor_stream present: compute 3D coords
1946
+ if tensor_stream is None and input_ids is None:
1947
+ raise ValueError("`tensor_stream` or `input_ids` must be provided to compute rope indices")
1948
+
1949
+ if tensor_stream is not None:
1950
+ pos_3d = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
1951
+ else:
1952
+ pos_3d = compute_position_ids_input_ids(input_ids)
1953
+ B, L, _ = pos_3d.shape
1954
+
1955
+ # Max position per batch across the 3 planes and sequence dimension: (B,)
1956
+ m_per_batch = pos_3d.amax(dim=(1, 2))
1957
+
1958
+ # Sequence lengths per batch: (B,)
1959
+ if attention_mask is None:
1960
+ seq_lens = torch.full_like(m_per_batch, L)
1961
+ else:
1962
+ seq_lens = attention_mask.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=m_per_batch.device)
1963
+
1964
+ rope_deltas = (m_per_batch + 1 - seq_lens).to(dtype=pos_3d.dtype).unsqueeze(1)
1965
+ return pos_3d, rope_deltas
1966
+
1967
  def prepare_inputs_for_generation(
1968
  self,
1969
  input_ids: torch.LongTensor,
 
2019
  else:
2020
  model_inputs["tensor_stream"] = None
2021
 
2022
+ # TensorStream decode path: preserve rotary offsets from prefill; let forward rebuild positions
2023
  if tensor_stream is not None and not first_step and self.rope_deltas is not None:
2024
  model_inputs["position_ids"] = None
2025
  return model_inputs
2026
 
 
 
 
 
 
 
 
 
 
 
 
2027
  return model_inputs
2028
 
2029
  @classmethod
 
2031
  return True
2032
 
2033
 
 
 
 
 
 
 
 
2034
  def _compute_residual_p_frames(frames: torch.Tensor, is_p_frame: list[bool]) -> torch.Tensor:
2035
  """Compute residuals for P-frames to stay in sync with the training pipeline."""
2036
  if not any(is_p_frame):
preprocessor_config.json DELETED
@@ -1,10 +0,0 @@
1
- {
2
- "processor_class": "IsaacProcessor",
3
- "tokenizer_class": [
4
- "Qwen2Tokenizer",
5
- "Qwen2TokenizerFast"
6
- ],
7
- "auto_map": {
8
- "AutoProcessor": "modular_isaac.IsaacProcessor"
9
- }
10
- }
 
 
 
 
 
 
 
 
 
 
 
processor_config.json CHANGED
@@ -7,9 +7,7 @@
7
  "auto_map": {
8
  "AutoProcessor": "modular_isaac.IsaacProcessor"
9
  },
10
- "crop_size": null,
11
  "data_format": "channels_first",
12
- "device": null,
13
  "disable_grouping": false,
14
  "do_center_crop": false,
15
  "do_convert_rgb": true,
@@ -23,23 +21,17 @@
23
  0.5
24
  ],
25
  "image_processor_type": "IsaacImageProcessorFast",
26
- "image_seq_length": null,
27
  "image_std": [
28
  0.5,
29
  0.5,
30
  0.5
31
  ],
32
- "input_data_format": null,
33
  "max_num_patches": 6144,
34
  "min_num_patches": 256,
35
- "pad_size": null,
36
  "patch_size": 16,
37
  "pixel_shuffle_scale": 2,
38
- "processor_class": "IsaacProcessor",
39
  "resample": 2,
40
- "rescale_factor": 0.00392156862745098,
41
- "return_tensors": null,
42
- "size": null
43
  },
44
  "max_sequence_length": 16384,
45
  "processor_class": "IsaacProcessor",
 
7
  "auto_map": {
8
  "AutoProcessor": "modular_isaac.IsaacProcessor"
9
  },
 
10
  "data_format": "channels_first",
 
11
  "disable_grouping": false,
12
  "do_center_crop": false,
13
  "do_convert_rgb": true,
 
21
  0.5
22
  ],
23
  "image_processor_type": "IsaacImageProcessorFast",
 
24
  "image_std": [
25
  0.5,
26
  0.5,
27
  0.5
28
  ],
 
29
  "max_num_patches": 6144,
30
  "min_num_patches": 256,
 
31
  "patch_size": 16,
32
  "pixel_shuffle_scale": 2,
 
33
  "resample": 2,
34
+ "rescale_factor": 0.00392156862745098
 
 
35
  },
36
  "max_sequence_length": 16384,
37
  "processor_class": "IsaacProcessor",
special_tokens_map.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|object_ref_start|>",
6
- "<|object_ref_end|>",
7
- "<|box_start|>",
8
- "<|box_end|>",
9
- "<|quad_start|>",
10
- "<|quad_end|>",
11
- "<|vision_start|>",
12
- "<|vision_end|>",
13
- "<|vision_pad|>",
14
- "<|image_pad|>",
15
- "<|video_pad|>"
16
- ],
17
- "eos_token": {
18
- "content": "<|im_end|>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- },
24
- "pad_token": {
25
- "content": "<|endoftext|>",
26
- "lstrip": false,
27
- "normalized": false,
28
- "rstrip": false,
29
- "single_word": false
30
- }
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c670a45d54b226b4213f50c920332be152acff8fafaabdafd5586e772c3d500
3
- size 11473541
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6a069d8afc5e4604a1d15db8b4678d9a804bda3991fe2822cf350ec571084f2
3
+ size 11473537
tokenizer_config.json CHANGED
@@ -1,7 +1,5 @@
1
  {
2
- "add_bos_token": false,
3
  "add_prefix_space": false,
4
- "additional_special_tokens": null,
5
  "auto_map": {
6
  "AutoProcessor": "modular_isaac.IsaacProcessor"
7
  },
 
1
  {
 
2
  "add_prefix_space": false,
 
3
  "auto_map": {
4
  "AutoProcessor": "modular_isaac.IsaacProcessor"
5
  },
vocab.json DELETED
The diff for this file is too large to render. See raw diff