Commit
·
6201703
1
Parent(s):
7e148c3
remove sequential
Browse files- added_tokens.json +0 -295
- config.json +5 -10
- merges.txt +0 -0
- model-00001-of-00003.safetensors +0 -3
- model-00002-of-00003.safetensors +0 -3
- model-00003-of-00003.safetensors → model.safetensors +2 -2
- model.safetensors.index.json +0 -758
- modular_isaac.py +411 -604
- processor_config.json +1 -9
- special_tokens_map.json +0 -31
- tokenizer.json +2 -2
- tokenizer_config.json +0 -2
- vocab.json +0 -0
added_tokens.json
DELETED
|
@@ -1,295 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"</think>": 151668,
|
| 3 |
-
"</tool_call>": 151658,
|
| 4 |
-
"</tool_response>": 151666,
|
| 5 |
-
"<reserved_0>": 151669,
|
| 6 |
-
"<reserved_100>": 151769,
|
| 7 |
-
"<reserved_101>": 151770,
|
| 8 |
-
"<reserved_102>": 151771,
|
| 9 |
-
"<reserved_103>": 151772,
|
| 10 |
-
"<reserved_104>": 151773,
|
| 11 |
-
"<reserved_105>": 151774,
|
| 12 |
-
"<reserved_106>": 151775,
|
| 13 |
-
"<reserved_107>": 151776,
|
| 14 |
-
"<reserved_108>": 151777,
|
| 15 |
-
"<reserved_109>": 151778,
|
| 16 |
-
"<reserved_10>": 151679,
|
| 17 |
-
"<reserved_110>": 151779,
|
| 18 |
-
"<reserved_111>": 151780,
|
| 19 |
-
"<reserved_112>": 151781,
|
| 20 |
-
"<reserved_113>": 151782,
|
| 21 |
-
"<reserved_114>": 151783,
|
| 22 |
-
"<reserved_115>": 151784,
|
| 23 |
-
"<reserved_116>": 151785,
|
| 24 |
-
"<reserved_117>": 151786,
|
| 25 |
-
"<reserved_118>": 151787,
|
| 26 |
-
"<reserved_119>": 151788,
|
| 27 |
-
"<reserved_11>": 151680,
|
| 28 |
-
"<reserved_120>": 151789,
|
| 29 |
-
"<reserved_121>": 151790,
|
| 30 |
-
"<reserved_122>": 151791,
|
| 31 |
-
"<reserved_123>": 151792,
|
| 32 |
-
"<reserved_124>": 151793,
|
| 33 |
-
"<reserved_125>": 151794,
|
| 34 |
-
"<reserved_126>": 151795,
|
| 35 |
-
"<reserved_127>": 151796,
|
| 36 |
-
"<reserved_128>": 151797,
|
| 37 |
-
"<reserved_129>": 151798,
|
| 38 |
-
"<reserved_12>": 151681,
|
| 39 |
-
"<reserved_130>": 151799,
|
| 40 |
-
"<reserved_131>": 151800,
|
| 41 |
-
"<reserved_132>": 151801,
|
| 42 |
-
"<reserved_133>": 151802,
|
| 43 |
-
"<reserved_134>": 151803,
|
| 44 |
-
"<reserved_135>": 151804,
|
| 45 |
-
"<reserved_136>": 151805,
|
| 46 |
-
"<reserved_137>": 151806,
|
| 47 |
-
"<reserved_138>": 151807,
|
| 48 |
-
"<reserved_139>": 151808,
|
| 49 |
-
"<reserved_13>": 151682,
|
| 50 |
-
"<reserved_140>": 151809,
|
| 51 |
-
"<reserved_141>": 151810,
|
| 52 |
-
"<reserved_142>": 151811,
|
| 53 |
-
"<reserved_143>": 151812,
|
| 54 |
-
"<reserved_144>": 151813,
|
| 55 |
-
"<reserved_145>": 151814,
|
| 56 |
-
"<reserved_146>": 151815,
|
| 57 |
-
"<reserved_147>": 151816,
|
| 58 |
-
"<reserved_148>": 151817,
|
| 59 |
-
"<reserved_149>": 151818,
|
| 60 |
-
"<reserved_14>": 151683,
|
| 61 |
-
"<reserved_150>": 151819,
|
| 62 |
-
"<reserved_151>": 151820,
|
| 63 |
-
"<reserved_152>": 151821,
|
| 64 |
-
"<reserved_153>": 151822,
|
| 65 |
-
"<reserved_154>": 151823,
|
| 66 |
-
"<reserved_155>": 151824,
|
| 67 |
-
"<reserved_156>": 151825,
|
| 68 |
-
"<reserved_157>": 151826,
|
| 69 |
-
"<reserved_158>": 151827,
|
| 70 |
-
"<reserved_159>": 151828,
|
| 71 |
-
"<reserved_15>": 151684,
|
| 72 |
-
"<reserved_160>": 151829,
|
| 73 |
-
"<reserved_161>": 151830,
|
| 74 |
-
"<reserved_162>": 151831,
|
| 75 |
-
"<reserved_163>": 151832,
|
| 76 |
-
"<reserved_164>": 151833,
|
| 77 |
-
"<reserved_165>": 151834,
|
| 78 |
-
"<reserved_166>": 151835,
|
| 79 |
-
"<reserved_167>": 151836,
|
| 80 |
-
"<reserved_168>": 151837,
|
| 81 |
-
"<reserved_169>": 151838,
|
| 82 |
-
"<reserved_16>": 151685,
|
| 83 |
-
"<reserved_170>": 151839,
|
| 84 |
-
"<reserved_171>": 151840,
|
| 85 |
-
"<reserved_172>": 151841,
|
| 86 |
-
"<reserved_173>": 151842,
|
| 87 |
-
"<reserved_174>": 151843,
|
| 88 |
-
"<reserved_175>": 151844,
|
| 89 |
-
"<reserved_176>": 151845,
|
| 90 |
-
"<reserved_177>": 151846,
|
| 91 |
-
"<reserved_178>": 151847,
|
| 92 |
-
"<reserved_179>": 151848,
|
| 93 |
-
"<reserved_17>": 151686,
|
| 94 |
-
"<reserved_180>": 151849,
|
| 95 |
-
"<reserved_181>": 151850,
|
| 96 |
-
"<reserved_182>": 151851,
|
| 97 |
-
"<reserved_183>": 151852,
|
| 98 |
-
"<reserved_184>": 151853,
|
| 99 |
-
"<reserved_185>": 151854,
|
| 100 |
-
"<reserved_186>": 151855,
|
| 101 |
-
"<reserved_187>": 151856,
|
| 102 |
-
"<reserved_188>": 151857,
|
| 103 |
-
"<reserved_189>": 151858,
|
| 104 |
-
"<reserved_18>": 151687,
|
| 105 |
-
"<reserved_190>": 151859,
|
| 106 |
-
"<reserved_191>": 151860,
|
| 107 |
-
"<reserved_192>": 151861,
|
| 108 |
-
"<reserved_193>": 151862,
|
| 109 |
-
"<reserved_194>": 151863,
|
| 110 |
-
"<reserved_195>": 151864,
|
| 111 |
-
"<reserved_196>": 151865,
|
| 112 |
-
"<reserved_197>": 151866,
|
| 113 |
-
"<reserved_198>": 151867,
|
| 114 |
-
"<reserved_199>": 151868,
|
| 115 |
-
"<reserved_19>": 151688,
|
| 116 |
-
"<reserved_1>": 151670,
|
| 117 |
-
"<reserved_200>": 151869,
|
| 118 |
-
"<reserved_201>": 151870,
|
| 119 |
-
"<reserved_202>": 151871,
|
| 120 |
-
"<reserved_203>": 151872,
|
| 121 |
-
"<reserved_204>": 151873,
|
| 122 |
-
"<reserved_205>": 151874,
|
| 123 |
-
"<reserved_206>": 151875,
|
| 124 |
-
"<reserved_207>": 151876,
|
| 125 |
-
"<reserved_208>": 151877,
|
| 126 |
-
"<reserved_209>": 151878,
|
| 127 |
-
"<reserved_20>": 151689,
|
| 128 |
-
"<reserved_210>": 151879,
|
| 129 |
-
"<reserved_211>": 151880,
|
| 130 |
-
"<reserved_212>": 151881,
|
| 131 |
-
"<reserved_213>": 151882,
|
| 132 |
-
"<reserved_214>": 151883,
|
| 133 |
-
"<reserved_215>": 151884,
|
| 134 |
-
"<reserved_216>": 151885,
|
| 135 |
-
"<reserved_217>": 151886,
|
| 136 |
-
"<reserved_218>": 151887,
|
| 137 |
-
"<reserved_219>": 151888,
|
| 138 |
-
"<reserved_21>": 151690,
|
| 139 |
-
"<reserved_220>": 151889,
|
| 140 |
-
"<reserved_221>": 151890,
|
| 141 |
-
"<reserved_222>": 151891,
|
| 142 |
-
"<reserved_223>": 151892,
|
| 143 |
-
"<reserved_224>": 151893,
|
| 144 |
-
"<reserved_225>": 151894,
|
| 145 |
-
"<reserved_226>": 151895,
|
| 146 |
-
"<reserved_227>": 151896,
|
| 147 |
-
"<reserved_228>": 151897,
|
| 148 |
-
"<reserved_229>": 151898,
|
| 149 |
-
"<reserved_22>": 151691,
|
| 150 |
-
"<reserved_230>": 151899,
|
| 151 |
-
"<reserved_231>": 151900,
|
| 152 |
-
"<reserved_232>": 151901,
|
| 153 |
-
"<reserved_233>": 151902,
|
| 154 |
-
"<reserved_234>": 151903,
|
| 155 |
-
"<reserved_235>": 151904,
|
| 156 |
-
"<reserved_236>": 151905,
|
| 157 |
-
"<reserved_237>": 151906,
|
| 158 |
-
"<reserved_238>": 151907,
|
| 159 |
-
"<reserved_239>": 151908,
|
| 160 |
-
"<reserved_23>": 151692,
|
| 161 |
-
"<reserved_240>": 151909,
|
| 162 |
-
"<reserved_241>": 151910,
|
| 163 |
-
"<reserved_242>": 151911,
|
| 164 |
-
"<reserved_243>": 151912,
|
| 165 |
-
"<reserved_244>": 151913,
|
| 166 |
-
"<reserved_245>": 151914,
|
| 167 |
-
"<reserved_246>": 151915,
|
| 168 |
-
"<reserved_247>": 151916,
|
| 169 |
-
"<reserved_248>": 151917,
|
| 170 |
-
"<reserved_249>": 151918,
|
| 171 |
-
"<reserved_24>": 151693,
|
| 172 |
-
"<reserved_250>": 151919,
|
| 173 |
-
"<reserved_251>": 151920,
|
| 174 |
-
"<reserved_252>": 151921,
|
| 175 |
-
"<reserved_253>": 151922,
|
| 176 |
-
"<reserved_254>": 151923,
|
| 177 |
-
"<reserved_255>": 151924,
|
| 178 |
-
"<reserved_256>": 151925,
|
| 179 |
-
"<reserved_257>": 151926,
|
| 180 |
-
"<reserved_258>": 151927,
|
| 181 |
-
"<reserved_259>": 151928,
|
| 182 |
-
"<reserved_25>": 151694,
|
| 183 |
-
"<reserved_260>": 151929,
|
| 184 |
-
"<reserved_261>": 151930,
|
| 185 |
-
"<reserved_262>": 151931,
|
| 186 |
-
"<reserved_263>": 151932,
|
| 187 |
-
"<reserved_264>": 151933,
|
| 188 |
-
"<reserved_265>": 151934,
|
| 189 |
-
"<reserved_266>": 151935,
|
| 190 |
-
"<reserved_26>": 151695,
|
| 191 |
-
"<reserved_27>": 151696,
|
| 192 |
-
"<reserved_28>": 151697,
|
| 193 |
-
"<reserved_29>": 151698,
|
| 194 |
-
"<reserved_2>": 151671,
|
| 195 |
-
"<reserved_30>": 151699,
|
| 196 |
-
"<reserved_31>": 151700,
|
| 197 |
-
"<reserved_32>": 151701,
|
| 198 |
-
"<reserved_33>": 151702,
|
| 199 |
-
"<reserved_34>": 151703,
|
| 200 |
-
"<reserved_35>": 151704,
|
| 201 |
-
"<reserved_36>": 151705,
|
| 202 |
-
"<reserved_37>": 151706,
|
| 203 |
-
"<reserved_38>": 151707,
|
| 204 |
-
"<reserved_39>": 151708,
|
| 205 |
-
"<reserved_3>": 151672,
|
| 206 |
-
"<reserved_40>": 151709,
|
| 207 |
-
"<reserved_41>": 151710,
|
| 208 |
-
"<reserved_42>": 151711,
|
| 209 |
-
"<reserved_43>": 151712,
|
| 210 |
-
"<reserved_44>": 151713,
|
| 211 |
-
"<reserved_45>": 151714,
|
| 212 |
-
"<reserved_46>": 151715,
|
| 213 |
-
"<reserved_47>": 151716,
|
| 214 |
-
"<reserved_48>": 151717,
|
| 215 |
-
"<reserved_49>": 151718,
|
| 216 |
-
"<reserved_4>": 151673,
|
| 217 |
-
"<reserved_50>": 151719,
|
| 218 |
-
"<reserved_51>": 151720,
|
| 219 |
-
"<reserved_52>": 151721,
|
| 220 |
-
"<reserved_53>": 151722,
|
| 221 |
-
"<reserved_54>": 151723,
|
| 222 |
-
"<reserved_55>": 151724,
|
| 223 |
-
"<reserved_56>": 151725,
|
| 224 |
-
"<reserved_57>": 151726,
|
| 225 |
-
"<reserved_58>": 151727,
|
| 226 |
-
"<reserved_59>": 151728,
|
| 227 |
-
"<reserved_5>": 151674,
|
| 228 |
-
"<reserved_60>": 151729,
|
| 229 |
-
"<reserved_61>": 151730,
|
| 230 |
-
"<reserved_62>": 151731,
|
| 231 |
-
"<reserved_63>": 151732,
|
| 232 |
-
"<reserved_64>": 151733,
|
| 233 |
-
"<reserved_65>": 151734,
|
| 234 |
-
"<reserved_66>": 151735,
|
| 235 |
-
"<reserved_67>": 151736,
|
| 236 |
-
"<reserved_68>": 151737,
|
| 237 |
-
"<reserved_69>": 151738,
|
| 238 |
-
"<reserved_6>": 151675,
|
| 239 |
-
"<reserved_70>": 151739,
|
| 240 |
-
"<reserved_71>": 151740,
|
| 241 |
-
"<reserved_72>": 151741,
|
| 242 |
-
"<reserved_73>": 151742,
|
| 243 |
-
"<reserved_74>": 151743,
|
| 244 |
-
"<reserved_75>": 151744,
|
| 245 |
-
"<reserved_76>": 151745,
|
| 246 |
-
"<reserved_77>": 151746,
|
| 247 |
-
"<reserved_78>": 151747,
|
| 248 |
-
"<reserved_79>": 151748,
|
| 249 |
-
"<reserved_7>": 151676,
|
| 250 |
-
"<reserved_80>": 151749,
|
| 251 |
-
"<reserved_81>": 151750,
|
| 252 |
-
"<reserved_82>": 151751,
|
| 253 |
-
"<reserved_83>": 151752,
|
| 254 |
-
"<reserved_84>": 151753,
|
| 255 |
-
"<reserved_85>": 151754,
|
| 256 |
-
"<reserved_86>": 151755,
|
| 257 |
-
"<reserved_87>": 151756,
|
| 258 |
-
"<reserved_88>": 151757,
|
| 259 |
-
"<reserved_89>": 151758,
|
| 260 |
-
"<reserved_8>": 151677,
|
| 261 |
-
"<reserved_90>": 151759,
|
| 262 |
-
"<reserved_91>": 151760,
|
| 263 |
-
"<reserved_92>": 151761,
|
| 264 |
-
"<reserved_93>": 151762,
|
| 265 |
-
"<reserved_94>": 151763,
|
| 266 |
-
"<reserved_95>": 151764,
|
| 267 |
-
"<reserved_96>": 151765,
|
| 268 |
-
"<reserved_97>": 151766,
|
| 269 |
-
"<reserved_98>": 151767,
|
| 270 |
-
"<reserved_99>": 151768,
|
| 271 |
-
"<reserved_9>": 151678,
|
| 272 |
-
"<think>": 151667,
|
| 273 |
-
"<tool_call>": 151657,
|
| 274 |
-
"<tool_response>": 151665,
|
| 275 |
-
"<|box_end|>": 151649,
|
| 276 |
-
"<|box_start|>": 151648,
|
| 277 |
-
"<|endoftext|>": 151643,
|
| 278 |
-
"<|file_sep|>": 151664,
|
| 279 |
-
"<|fim_middle|>": 151660,
|
| 280 |
-
"<|fim_pad|>": 151662,
|
| 281 |
-
"<|fim_prefix|>": 151659,
|
| 282 |
-
"<|fim_suffix|>": 151661,
|
| 283 |
-
"<|im_end|>": 151645,
|
| 284 |
-
"<|im_start|>": 151644,
|
| 285 |
-
"<|image_pad|>": 151655,
|
| 286 |
-
"<|object_ref_end|>": 151647,
|
| 287 |
-
"<|object_ref_start|>": 151646,
|
| 288 |
-
"<|quad_end|>": 151651,
|
| 289 |
-
"<|quad_start|>": 151650,
|
| 290 |
-
"<|repo_name|>": 151663,
|
| 291 |
-
"<|video_pad|>": 151656,
|
| 292 |
-
"<|vision_end|>": 151653,
|
| 293 |
-
"<|vision_pad|>": 151654,
|
| 294 |
-
"<|vision_start|>": 151652
|
| 295 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
|
@@ -1,12 +1,4 @@
|
|
| 1 |
{
|
| 2 |
-
"_rope_parameters": {
|
| 3 |
-
"rope_theta": 1000000,
|
| 4 |
-
"rope_type": "default"
|
| 5 |
-
},
|
| 6 |
-
"_rope_scaling": {
|
| 7 |
-
"rope_theta": 1000000,
|
| 8 |
-
"rope_type": "default"
|
| 9 |
-
},
|
| 10 |
"architectures": [
|
| 11 |
"IsaacForConditionalGeneration"
|
| 12 |
],
|
|
@@ -63,7 +55,11 @@
|
|
| 63 |
"num_key_value_heads": 8,
|
| 64 |
"pixel_shuffle_scale": 2,
|
| 65 |
"rms_norm_eps": 1e-06,
|
| 66 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
"sliding_window": null,
|
| 68 |
"text_config": {
|
| 69 |
"architectures": [
|
|
@@ -121,7 +117,6 @@
|
|
| 121 |
"rope_theta": 1000000,
|
| 122 |
"rope_type": "default"
|
| 123 |
},
|
| 124 |
-
"rope_theta": 1000000.0,
|
| 125 |
"sliding_window": null,
|
| 126 |
"use_cache": true,
|
| 127 |
"use_sliding_window": false,
|
|
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
"architectures": [
|
| 3 |
"IsaacForConditionalGeneration"
|
| 4 |
],
|
|
|
|
| 55 |
"num_key_value_heads": 8,
|
| 56 |
"pixel_shuffle_scale": 2,
|
| 57 |
"rms_norm_eps": 1e-06,
|
| 58 |
+
"rope_parameters": {
|
| 59 |
+
"rope_theta": 1000000,
|
| 60 |
+
"rope_type": "default"
|
| 61 |
+
},
|
| 62 |
+
"rope_theta": 1000000,
|
| 63 |
"sliding_window": null,
|
| 64 |
"text_config": {
|
| 65 |
"architectures": [
|
|
|
|
| 117 |
"rope_theta": 1000000,
|
| 118 |
"rope_type": "default"
|
| 119 |
},
|
|
|
|
| 120 |
"sliding_window": null,
|
| 121 |
"use_cache": true,
|
| 122 |
"use_sliding_window": false,
|
merges.txt
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00003.safetensors
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:6bb45f8ee129e542e12ea62717345ce39118f3f26971a082410b70f898aad3f3
|
| 3 |
-
size 4969541832
|
|
|
|
|
|
|
|
|
|
|
|
model-00002-of-00003.safetensors
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:bd03019a8b436cad6445923d117afea6b5925be937cafedb86c65bc76f37624b
|
| 3 |
-
size 4054193816
|
|
|
|
|
|
|
|
|
|
|
|
model-00003-of-00003.safetensors → model.safetensors
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50d7a76b77897015695972d1c51841854c659017480c976f98f860c2e70f8aa6
|
| 3 |
+
size 10268395696
|
model.safetensors.index.json
DELETED
|
@@ -1,758 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"metadata": {
|
| 3 |
-
"total_parameters": 2567073008,
|
| 4 |
-
"total_size": 10268292032
|
| 5 |
-
},
|
| 6 |
-
"weight_map": {
|
| 7 |
-
"lm_head.weight": "model-00003-of-00003.safetensors",
|
| 8 |
-
"model.text_model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
| 9 |
-
"model.text_model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 10 |
-
"model.text_model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 11 |
-
"model.text_model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 12 |
-
"model.text_model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 13 |
-
"model.text_model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 14 |
-
"model.text_model.layers.0.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 15 |
-
"model.text_model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 16 |
-
"model.text_model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 17 |
-
"model.text_model.layers.0.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 18 |
-
"model.text_model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 19 |
-
"model.text_model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 20 |
-
"model.text_model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 21 |
-
"model.text_model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 22 |
-
"model.text_model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 23 |
-
"model.text_model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 24 |
-
"model.text_model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 25 |
-
"model.text_model.layers.1.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 26 |
-
"model.text_model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 27 |
-
"model.text_model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 28 |
-
"model.text_model.layers.1.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 29 |
-
"model.text_model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 30 |
-
"model.text_model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 31 |
-
"model.text_model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 32 |
-
"model.text_model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 33 |
-
"model.text_model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 34 |
-
"model.text_model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 35 |
-
"model.text_model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 36 |
-
"model.text_model.layers.10.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 37 |
-
"model.text_model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 38 |
-
"model.text_model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 39 |
-
"model.text_model.layers.10.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 40 |
-
"model.text_model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 41 |
-
"model.text_model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 42 |
-
"model.text_model.layers.11.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 43 |
-
"model.text_model.layers.11.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 44 |
-
"model.text_model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 45 |
-
"model.text_model.layers.11.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 46 |
-
"model.text_model.layers.11.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 47 |
-
"model.text_model.layers.11.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 48 |
-
"model.text_model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 49 |
-
"model.text_model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 50 |
-
"model.text_model.layers.11.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 51 |
-
"model.text_model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 52 |
-
"model.text_model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 53 |
-
"model.text_model.layers.12.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 54 |
-
"model.text_model.layers.12.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 55 |
-
"model.text_model.layers.12.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 56 |
-
"model.text_model.layers.12.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 57 |
-
"model.text_model.layers.12.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 58 |
-
"model.text_model.layers.12.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 59 |
-
"model.text_model.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 60 |
-
"model.text_model.layers.12.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 61 |
-
"model.text_model.layers.12.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 62 |
-
"model.text_model.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 63 |
-
"model.text_model.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 64 |
-
"model.text_model.layers.13.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 65 |
-
"model.text_model.layers.13.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 66 |
-
"model.text_model.layers.13.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 67 |
-
"model.text_model.layers.13.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 68 |
-
"model.text_model.layers.13.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 69 |
-
"model.text_model.layers.13.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 70 |
-
"model.text_model.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 71 |
-
"model.text_model.layers.13.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 72 |
-
"model.text_model.layers.13.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 73 |
-
"model.text_model.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 74 |
-
"model.text_model.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 75 |
-
"model.text_model.layers.14.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 76 |
-
"model.text_model.layers.14.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 77 |
-
"model.text_model.layers.14.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 78 |
-
"model.text_model.layers.14.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 79 |
-
"model.text_model.layers.14.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 80 |
-
"model.text_model.layers.14.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 81 |
-
"model.text_model.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 82 |
-
"model.text_model.layers.14.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 83 |
-
"model.text_model.layers.14.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 84 |
-
"model.text_model.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 85 |
-
"model.text_model.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 86 |
-
"model.text_model.layers.15.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 87 |
-
"model.text_model.layers.15.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 88 |
-
"model.text_model.layers.15.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 89 |
-
"model.text_model.layers.15.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 90 |
-
"model.text_model.layers.15.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 91 |
-
"model.text_model.layers.15.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 92 |
-
"model.text_model.layers.15.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 93 |
-
"model.text_model.layers.15.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 94 |
-
"model.text_model.layers.15.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 95 |
-
"model.text_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 96 |
-
"model.text_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 97 |
-
"model.text_model.layers.16.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 98 |
-
"model.text_model.layers.16.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 99 |
-
"model.text_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 100 |
-
"model.text_model.layers.16.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 101 |
-
"model.text_model.layers.16.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 102 |
-
"model.text_model.layers.16.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 103 |
-
"model.text_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 104 |
-
"model.text_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 105 |
-
"model.text_model.layers.16.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 106 |
-
"model.text_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 107 |
-
"model.text_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 108 |
-
"model.text_model.layers.17.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 109 |
-
"model.text_model.layers.17.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 110 |
-
"model.text_model.layers.17.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 111 |
-
"model.text_model.layers.17.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 112 |
-
"model.text_model.layers.17.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 113 |
-
"model.text_model.layers.17.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 114 |
-
"model.text_model.layers.17.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 115 |
-
"model.text_model.layers.17.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 116 |
-
"model.text_model.layers.17.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 117 |
-
"model.text_model.layers.17.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 118 |
-
"model.text_model.layers.17.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 119 |
-
"model.text_model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 120 |
-
"model.text_model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 121 |
-
"model.text_model.layers.18.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 122 |
-
"model.text_model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 123 |
-
"model.text_model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 124 |
-
"model.text_model.layers.18.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 125 |
-
"model.text_model.layers.18.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 126 |
-
"model.text_model.layers.18.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 127 |
-
"model.text_model.layers.18.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 128 |
-
"model.text_model.layers.18.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 129 |
-
"model.text_model.layers.18.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 130 |
-
"model.text_model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 131 |
-
"model.text_model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 132 |
-
"model.text_model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 133 |
-
"model.text_model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 134 |
-
"model.text_model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 135 |
-
"model.text_model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 136 |
-
"model.text_model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 137 |
-
"model.text_model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 138 |
-
"model.text_model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 139 |
-
"model.text_model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 140 |
-
"model.text_model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 141 |
-
"model.text_model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 142 |
-
"model.text_model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 143 |
-
"model.text_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 144 |
-
"model.text_model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 145 |
-
"model.text_model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 146 |
-
"model.text_model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 147 |
-
"model.text_model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 148 |
-
"model.text_model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 149 |
-
"model.text_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 150 |
-
"model.text_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 151 |
-
"model.text_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 152 |
-
"model.text_model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 153 |
-
"model.text_model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 154 |
-
"model.text_model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 155 |
-
"model.text_model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 156 |
-
"model.text_model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 157 |
-
"model.text_model.layers.20.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 158 |
-
"model.text_model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 159 |
-
"model.text_model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 160 |
-
"model.text_model.layers.20.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 161 |
-
"model.text_model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 162 |
-
"model.text_model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 163 |
-
"model.text_model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 164 |
-
"model.text_model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 165 |
-
"model.text_model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 166 |
-
"model.text_model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 167 |
-
"model.text_model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 168 |
-
"model.text_model.layers.21.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 169 |
-
"model.text_model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 170 |
-
"model.text_model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 171 |
-
"model.text_model.layers.21.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 172 |
-
"model.text_model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 173 |
-
"model.text_model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 174 |
-
"model.text_model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 175 |
-
"model.text_model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 176 |
-
"model.text_model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 177 |
-
"model.text_model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 178 |
-
"model.text_model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 179 |
-
"model.text_model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 180 |
-
"model.text_model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 181 |
-
"model.text_model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 182 |
-
"model.text_model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 183 |
-
"model.text_model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 184 |
-
"model.text_model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 185 |
-
"model.text_model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 186 |
-
"model.text_model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 187 |
-
"model.text_model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 188 |
-
"model.text_model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 189 |
-
"model.text_model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 190 |
-
"model.text_model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 191 |
-
"model.text_model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 192 |
-
"model.text_model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 193 |
-
"model.text_model.layers.23.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 194 |
-
"model.text_model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 195 |
-
"model.text_model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 196 |
-
"model.text_model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 197 |
-
"model.text_model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 198 |
-
"model.text_model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 199 |
-
"model.text_model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 200 |
-
"model.text_model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 201 |
-
"model.text_model.layers.24.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 202 |
-
"model.text_model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 203 |
-
"model.text_model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 204 |
-
"model.text_model.layers.24.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 205 |
-
"model.text_model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 206 |
-
"model.text_model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 207 |
-
"model.text_model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 208 |
-
"model.text_model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 209 |
-
"model.text_model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 210 |
-
"model.text_model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 211 |
-
"model.text_model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 212 |
-
"model.text_model.layers.25.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 213 |
-
"model.text_model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 214 |
-
"model.text_model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 215 |
-
"model.text_model.layers.25.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 216 |
-
"model.text_model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 217 |
-
"model.text_model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 218 |
-
"model.text_model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 219 |
-
"model.text_model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 220 |
-
"model.text_model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 221 |
-
"model.text_model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 222 |
-
"model.text_model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 223 |
-
"model.text_model.layers.26.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 224 |
-
"model.text_model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 225 |
-
"model.text_model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 226 |
-
"model.text_model.layers.26.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 227 |
-
"model.text_model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 228 |
-
"model.text_model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 229 |
-
"model.text_model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 230 |
-
"model.text_model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 231 |
-
"model.text_model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 232 |
-
"model.text_model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 233 |
-
"model.text_model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 234 |
-
"model.text_model.layers.27.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
|
| 235 |
-
"model.text_model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 236 |
-
"model.text_model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 237 |
-
"model.text_model.layers.27.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
|
| 238 |
-
"model.text_model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 239 |
-
"model.text_model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 240 |
-
"model.text_model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 241 |
-
"model.text_model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 242 |
-
"model.text_model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 243 |
-
"model.text_model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 244 |
-
"model.text_model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 245 |
-
"model.text_model.layers.3.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 246 |
-
"model.text_model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 247 |
-
"model.text_model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 248 |
-
"model.text_model.layers.3.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 249 |
-
"model.text_model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 250 |
-
"model.text_model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 251 |
-
"model.text_model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 252 |
-
"model.text_model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 253 |
-
"model.text_model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 254 |
-
"model.text_model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 255 |
-
"model.text_model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 256 |
-
"model.text_model.layers.4.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 257 |
-
"model.text_model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 258 |
-
"model.text_model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 259 |
-
"model.text_model.layers.4.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 260 |
-
"model.text_model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 261 |
-
"model.text_model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 262 |
-
"model.text_model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 263 |
-
"model.text_model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 264 |
-
"model.text_model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 265 |
-
"model.text_model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 266 |
-
"model.text_model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 267 |
-
"model.text_model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 268 |
-
"model.text_model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 269 |
-
"model.text_model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 270 |
-
"model.text_model.layers.5.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 271 |
-
"model.text_model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 272 |
-
"model.text_model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 273 |
-
"model.text_model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 274 |
-
"model.text_model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 275 |
-
"model.text_model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 276 |
-
"model.text_model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 277 |
-
"model.text_model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 278 |
-
"model.text_model.layers.6.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 279 |
-
"model.text_model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 280 |
-
"model.text_model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 281 |
-
"model.text_model.layers.6.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 282 |
-
"model.text_model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 283 |
-
"model.text_model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 284 |
-
"model.text_model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 285 |
-
"model.text_model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 286 |
-
"model.text_model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 287 |
-
"model.text_model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 288 |
-
"model.text_model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 289 |
-
"model.text_model.layers.7.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 290 |
-
"model.text_model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 291 |
-
"model.text_model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 292 |
-
"model.text_model.layers.7.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 293 |
-
"model.text_model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 294 |
-
"model.text_model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 295 |
-
"model.text_model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 296 |
-
"model.text_model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 297 |
-
"model.text_model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 298 |
-
"model.text_model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 299 |
-
"model.text_model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 300 |
-
"model.text_model.layers.8.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 301 |
-
"model.text_model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 302 |
-
"model.text_model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 303 |
-
"model.text_model.layers.8.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 304 |
-
"model.text_model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 305 |
-
"model.text_model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 306 |
-
"model.text_model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 307 |
-
"model.text_model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 308 |
-
"model.text_model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 309 |
-
"model.text_model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 310 |
-
"model.text_model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 311 |
-
"model.text_model.layers.9.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
|
| 312 |
-
"model.text_model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 313 |
-
"model.text_model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 314 |
-
"model.text_model.layers.9.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
|
| 315 |
-
"model.text_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 316 |
-
"model.text_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 317 |
-
"model.text_model.norm.weight": "model-00002-of-00003.safetensors",
|
| 318 |
-
"model.vision_embedding.multimodal_projector.0.weight": "model-00002-of-00003.safetensors",
|
| 319 |
-
"model.vision_embedding.multimodal_projector.2.weight": "model-00002-of-00003.safetensors",
|
| 320 |
-
"model.vision_embedding.vision_tower.embeddings.patch_embedding.bias": "model-00002-of-00003.safetensors",
|
| 321 |
-
"model.vision_embedding.vision_tower.embeddings.patch_embedding.weight": "model-00002-of-00003.safetensors",
|
| 322 |
-
"model.vision_embedding.vision_tower.embeddings.position_embedding.weight": "model-00002-of-00003.safetensors",
|
| 323 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 324 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 325 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 326 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 327 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 328 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 329 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 330 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 331 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 332 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 333 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 334 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 335 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 336 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 337 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 338 |
-
"model.vision_embedding.vision_tower.encoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 339 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 340 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 341 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 342 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 343 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 344 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 345 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 346 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 347 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 348 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 349 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 350 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 351 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 352 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 353 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 354 |
-
"model.vision_embedding.vision_tower.encoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 355 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 356 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 357 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 358 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 359 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 360 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 361 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 362 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 363 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 364 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 365 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 366 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 367 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 368 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 369 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 370 |
-
"model.vision_embedding.vision_tower.encoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 371 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 372 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 373 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 374 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 375 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 376 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 377 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 378 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 379 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 380 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 381 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 382 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 383 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 384 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 385 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 386 |
-
"model.vision_embedding.vision_tower.encoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 387 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 388 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 389 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 390 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 391 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 392 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 393 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 394 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 395 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 396 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 397 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 398 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 399 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 400 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 401 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 402 |
-
"model.vision_embedding.vision_tower.encoder.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 403 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 404 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 405 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 406 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 407 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 408 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 409 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 410 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 411 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 412 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 413 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 414 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 415 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 416 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 417 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 418 |
-
"model.vision_embedding.vision_tower.encoder.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 419 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 420 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 421 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 422 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 423 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 424 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 425 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 426 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 427 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 428 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 429 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 430 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 431 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 432 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 433 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 434 |
-
"model.vision_embedding.vision_tower.encoder.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 435 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 436 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 437 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 438 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 439 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 440 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 441 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 442 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 443 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 444 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 445 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 446 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 447 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 448 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 449 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 450 |
-
"model.vision_embedding.vision_tower.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 451 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 452 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 453 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 454 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 455 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 456 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 457 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 458 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 459 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 460 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 461 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 462 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 463 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 464 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 465 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 466 |
-
"model.vision_embedding.vision_tower.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 467 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 468 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 469 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 470 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 471 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 472 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 473 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 474 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 475 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 476 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 477 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 478 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 479 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 480 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 481 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 482 |
-
"model.vision_embedding.vision_tower.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 483 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 484 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 485 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 486 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 487 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 488 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 489 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 490 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 491 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 492 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 493 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 494 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 495 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 496 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 497 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 498 |
-
"model.vision_embedding.vision_tower.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 499 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 500 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 501 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 502 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 503 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 504 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 505 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 506 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 507 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 508 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 509 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 510 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 511 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 512 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 513 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 514 |
-
"model.vision_embedding.vision_tower.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 515 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 516 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 517 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 518 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 519 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 520 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 521 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 522 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 523 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 524 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 525 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 526 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 527 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 528 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 529 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 530 |
-
"model.vision_embedding.vision_tower.encoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 531 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 532 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 533 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 534 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 535 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 536 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 537 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 538 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 539 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 540 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 541 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 542 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 543 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 544 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 545 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 546 |
-
"model.vision_embedding.vision_tower.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 547 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 548 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 549 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 550 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 551 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 552 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 553 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 554 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 555 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 556 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 557 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 558 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 559 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 560 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 561 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 562 |
-
"model.vision_embedding.vision_tower.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 563 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 564 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 565 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 566 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 567 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 568 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 569 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 570 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 571 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 572 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 573 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 574 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 575 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 576 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 577 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 578 |
-
"model.vision_embedding.vision_tower.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 579 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 580 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 581 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 582 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 583 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 584 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 585 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 586 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 587 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 588 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 589 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 590 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 591 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 592 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 593 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 594 |
-
"model.vision_embedding.vision_tower.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 595 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 596 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 597 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 598 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 599 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 600 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 601 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 602 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 603 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 604 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 605 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 606 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 607 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 608 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 609 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 610 |
-
"model.vision_embedding.vision_tower.encoder.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 611 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 612 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 613 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 614 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 615 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 616 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 617 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 618 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 619 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 620 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 621 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 622 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 623 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 624 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 625 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 626 |
-
"model.vision_embedding.vision_tower.encoder.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 627 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 628 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 629 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 630 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 631 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 632 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 633 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 634 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 635 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 636 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 637 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 638 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 639 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 640 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 641 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 642 |
-
"model.vision_embedding.vision_tower.encoder.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 643 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 644 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 645 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 646 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 647 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 648 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 649 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 650 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 651 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 652 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 653 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 654 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 655 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 656 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 657 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 658 |
-
"model.vision_embedding.vision_tower.encoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 659 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 660 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 661 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 662 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 663 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 664 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 665 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 666 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 667 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 668 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 669 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 670 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 671 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 672 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 673 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 674 |
-
"model.vision_embedding.vision_tower.encoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 675 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 676 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 677 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 678 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 679 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 680 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 681 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 682 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 683 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 684 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 685 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 686 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 687 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 688 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 689 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 690 |
-
"model.vision_embedding.vision_tower.encoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 691 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 692 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 693 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 694 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 695 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 696 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 697 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 698 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 699 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 700 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 701 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 702 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 703 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 704 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 705 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 706 |
-
"model.vision_embedding.vision_tower.encoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 707 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 708 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 709 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 710 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 711 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 712 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 713 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 714 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 715 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 716 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 717 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 718 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 719 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 720 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 721 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 722 |
-
"model.vision_embedding.vision_tower.encoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 723 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 724 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 725 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 726 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 727 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 728 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 729 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 730 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 731 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 732 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 733 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 734 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 735 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 736 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 737 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 738 |
-
"model.vision_embedding.vision_tower.encoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 739 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.layer_norm1.bias": "model-00002-of-00003.safetensors",
|
| 740 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.layer_norm1.weight": "model-00002-of-00003.safetensors",
|
| 741 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.layer_norm2.bias": "model-00002-of-00003.safetensors",
|
| 742 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.layer_norm2.weight": "model-00002-of-00003.safetensors",
|
| 743 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc1.bias": "model-00002-of-00003.safetensors",
|
| 744 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc1.weight": "model-00002-of-00003.safetensors",
|
| 745 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc2.bias": "model-00002-of-00003.safetensors",
|
| 746 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.mlp.fc2.weight": "model-00002-of-00003.safetensors",
|
| 747 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
| 748 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 749 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00003.safetensors",
|
| 750 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00003.safetensors",
|
| 751 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
| 752 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 753 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
| 754 |
-
"model.vision_embedding.vision_tower.encoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 755 |
-
"model.vision_embedding.vision_tower.post_layernorm.bias": "model-00002-of-00003.safetensors",
|
| 756 |
-
"model.vision_embedding.vision_tower.post_layernorm.weight": "model-00002-of-00003.safetensors"
|
| 757 |
-
}
|
| 758 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
modular_isaac.py
CHANGED
|
@@ -88,7 +88,7 @@ import re
|
|
| 88 |
from collections import defaultdict
|
| 89 |
from typing import Any, Callable, Optional, Sequence, Union
|
| 90 |
|
| 91 |
-
|
| 92 |
import torch
|
| 93 |
import torch.nn as nn
|
| 94 |
import torch.nn.functional as F
|
|
@@ -133,7 +133,7 @@ from transformers.models.siglip2.modeling_siglip2 import (
|
|
| 133 |
from transformers.masking_utils import create_masks_for_generate, eager_mask, packed_sequence_mask_function, sdpa_mask
|
| 134 |
from transformers.processing_utils import ImagesKwargs, ProcessorMixin, Unpack
|
| 135 |
from transformers.utils import auto_docstring, TensorType
|
| 136 |
-
from transformers.utils.generic import can_return_tuple, check_model_inputs
|
| 137 |
|
| 138 |
# Vision preprocessing constants
|
| 139 |
from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN
|
|
@@ -141,7 +141,16 @@ from transformers.utils.constants import IMAGENET_STANDARD_STD as VISION_STD
|
|
| 141 |
from transformers.utils.import_utils import is_torchdynamo_compiling
|
| 142 |
|
| 143 |
try:
|
| 144 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
Event,
|
| 146 |
Stream,
|
| 147 |
TensorStream,
|
|
@@ -150,25 +159,16 @@ try:
|
|
| 150 |
create_stream,
|
| 151 |
group_streams,
|
| 152 |
)
|
| 153 |
-
from genesis.public.tensorstream.tensor_stream_utils import (
|
| 154 |
-
compute_mrope_pos_tensor,
|
| 155 |
-
modality_mask,
|
| 156 |
-
reconstruct_tensor_stream_from_compact_dict,
|
| 157 |
-
tensor_stream_token_view,
|
| 158 |
-
)
|
| 159 |
-
from genesis.public.tensorstream.tensor_stream_utils import (
|
| 160 |
-
slice as ts_slice,
|
| 161 |
-
)
|
| 162 |
except ModuleNotFoundError as exc: # pragma: no cover - import guard
|
| 163 |
raise ModuleNotFoundError(
|
| 164 |
"genesis.public.tensorstream is required for the Isaac HuggingFace integration. "
|
| 165 |
"Ensure the TensorStream package is installed and on PYTHONPATH."
|
| 166 |
) from exc
|
| 167 |
|
| 168 |
-
_ORIGINAL_ATTENTION_FUNCTIONS: dict[str, Callable[..., tuple[torch.Tensor, Optional[torch.Tensor]]]] = {}
|
| 169 |
-
for _attn_name in ("flash_attention_2", "sdpa", "eager"):
|
| 170 |
-
|
| 171 |
-
|
| 172 |
|
| 173 |
|
| 174 |
class IsaacVisionConfig(Siglip2VisionConfig):
|
|
@@ -217,8 +217,12 @@ class IsaacVisionConfig(Siglip2VisionConfig):
|
|
| 217 |
# Add our custom fields
|
| 218 |
self.pixel_shuffle_scale_factor = pixel_shuffle_scale_factor
|
| 219 |
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
-
|
|
|
|
| 222 |
patch_size: Optional[int]
|
| 223 |
max_num_patches: Optional[int]
|
| 224 |
min_num_patches: Optional[int]
|
|
@@ -232,36 +236,27 @@ class IsaacImageProcessorFast(BaseImageProcessorFast):
|
|
| 232 |
|
| 233 |
resample = PILImageResampling.BILINEAR
|
| 234 |
model_input_names = ["patches", "token_grids"]
|
| 235 |
-
valid_kwargs =
|
| 236 |
unused_kwargs = ["size", "do_center_crop", "crop_size"]
|
| 237 |
|
| 238 |
do_resize = True
|
| 239 |
-
size: Optional[SizeDict] = None
|
| 240 |
-
default_to_square: Optional[bool] = None
|
| 241 |
do_center_crop = False
|
| 242 |
-
crop_size: Optional[SizeDict] = None
|
| 243 |
patch_size: Optional[int] = 16
|
| 244 |
max_num_patches: Optional[int] = 256
|
| 245 |
min_num_patches: Optional[int] = None
|
| 246 |
pixel_shuffle_scale: Optional[int] = 1
|
| 247 |
do_pad = False
|
| 248 |
-
pad_size: Optional[SizeDict] = None
|
| 249 |
do_rescale = True
|
| 250 |
-
rescale_factor = 1 / 255
|
| 251 |
do_normalize = True
|
| 252 |
image_mean = list(VISION_MEAN)
|
| 253 |
image_std = list(VISION_STD)
|
| 254 |
do_convert_rgb = True
|
| 255 |
-
return_tensors = None
|
| 256 |
-
data_format = ChannelDimension.FIRST
|
| 257 |
-
input_data_format = None
|
| 258 |
-
device = None
|
| 259 |
disable_grouping = False
|
| 260 |
size_divisor: Optional[int] = None
|
| 261 |
|
| 262 |
def __init__(
|
| 263 |
self,
|
| 264 |
-
**kwargs: Unpack[
|
| 265 |
) -> None:
|
| 266 |
super().__init__(**kwargs)
|
| 267 |
|
|
@@ -397,7 +392,7 @@ class IsaacImageProcessorFast(BaseImageProcessorFast):
|
|
| 397 |
nhwc_images = image_batch.permute(0, 2, 3, 1)
|
| 398 |
nhwc_images = _compute_residual_p_frames(nhwc_images, is_p_frame=[False] * batch_size)
|
| 399 |
|
| 400 |
-
patches =
|
| 401 |
_, height_tokens, width_tokens, _ = patches.shape
|
| 402 |
|
| 403 |
token_grid = (
|
|
@@ -486,38 +481,44 @@ def document_mask_function_from_cu_seqlens(cu_seqlens: Optional[torch.Tensor]) -
|
|
| 486 |
return packed_sequence_mask_function(packed_sequence_mask)
|
| 487 |
|
| 488 |
|
| 489 |
-
def
|
| 490 |
-
|
|
|
|
| 491 |
cu_seqlens: Optional[torch.Tensor],
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
device: torch.device,
|
| 495 |
-
*,
|
| 496 |
-
return_mask_function: bool = False,
|
| 497 |
-
) -> Optional[Union[torch.Tensor, Callable]]:
|
| 498 |
-
"""Return the provided mask, a callable mask from ``cu_seqlens``, or ``None``.
|
| 499 |
-
|
| 500 |
-
``return_mask_function=True`` yields a callable suitable for ``masking_utils``; otherwise
|
| 501 |
-
``None`` is returned when no explicit ``attention_mask`` is provided. The legacy additive mask
|
| 502 |
-
has been removed in favor of the callable-based path.
|
| 503 |
-
"""
|
| 504 |
|
| 505 |
-
|
| 506 |
-
|
|
|
|
|
|
|
| 507 |
|
| 508 |
-
|
|
|
|
| 509 |
return None
|
| 510 |
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 515 |
|
| 516 |
|
| 517 |
class IsaacVisionEmbeddings(nn.Module):
|
| 518 |
"""Adapter around SigLIP2 vision embeddings that consumes packed patch sequences."""
|
| 519 |
|
| 520 |
-
# Copied from transformers.models.siglip2.modeling_siglip2.Siglip2VisionEmbeddings.__init__
|
| 521 |
def __init__(self, config: IsaacVisionConfig):
|
| 522 |
super().__init__()
|
| 523 |
self.config = config
|
|
@@ -538,7 +539,6 @@ class IsaacVisionEmbeddings(nn.Module):
|
|
| 538 |
if packed_pixel_values is None:
|
| 539 |
return seq_patches.new_zeros((0, self.embed_dim))
|
| 540 |
|
| 541 |
-
# Copied from transformers.models.siglip2.modeling_siglip2.Siglip2VisionEmbeddings.forward
|
| 542 |
target_dtype = self.patch_embedding.weight.dtype
|
| 543 |
patch_embeds = self.patch_embedding(packed_pixel_values.to(dtype=target_dtype))
|
| 544 |
|
|
@@ -554,7 +554,6 @@ class IsaacVisionEmbeddings(nn.Module):
|
|
| 554 |
embeddings = patch_embeds + resized_positional_embeddings
|
| 555 |
return self._unpack_from_batch(embeddings, seq_lengths)
|
| 556 |
|
| 557 |
-
# Copied from transformers.models.siglip2.modeling_siglip2.Siglip2VisionEmbeddings.resize_positional_embeddings
|
| 558 |
@staticmethod
|
| 559 |
def resize_positional_embeddings(
|
| 560 |
positional_embeddings: torch.Tensor,
|
|
@@ -668,288 +667,89 @@ class IsaacVisionEmbeddings(nn.Module):
|
|
| 668 |
class IsaacVisionAttention(Siglip2Attention):
|
| 669 |
"""Custom attention that supports variable-length sequences with flash attention."""
|
| 670 |
|
| 671 |
-
ATTENTION_KEY_MAP: dict[str, str] = {
|
| 672 |
-
"flash_attention_2": "isaac_flash_attention_2",
|
| 673 |
-
"flash_attention_3": "isaac_flash_attention_3",
|
| 674 |
-
"isaac_flash_attention_2": "isaac_flash_attention_2",
|
| 675 |
-
"isaac_flash_attention_3": "isaac_flash_attention_3",
|
| 676 |
-
"sdpa": "isaac_sdpa",
|
| 677 |
-
"isaac_sdpa": "isaac_sdpa",
|
| 678 |
-
"eager": "isaac_eager",
|
| 679 |
-
"isaac_eager": "isaac_eager",
|
| 680 |
-
}
|
| 681 |
-
_FLASH_IMPLS = frozenset(("isaac_flash_attention_2", "isaac_flash_attention_3"))
|
| 682 |
-
|
| 683 |
-
def __init__(self, config):
|
| 684 |
-
super().__init__(config)
|
| 685 |
-
self.config = config
|
| 686 |
-
self._variable_length_metadata = None
|
| 687 |
-
|
| 688 |
-
def _variable_length_context(self, *, cu_seqlens=None, max_seqlen=None):
|
| 689 |
-
"""Store packed-sequence metadata for the next forward call."""
|
| 690 |
-
self._variable_length_metadata = (cu_seqlens, max_seqlen)
|
| 691 |
-
|
| 692 |
-
def _consume_variable_length_metadata(self):
|
| 693 |
-
if self._variable_length_metadata is None:
|
| 694 |
-
return None, None
|
| 695 |
-
cu_seqlens, max_seqlen = self._variable_length_metadata
|
| 696 |
-
self._variable_length_metadata = None
|
| 697 |
-
return cu_seqlens, max_seqlen
|
| 698 |
-
|
| 699 |
def forward(
|
| 700 |
self,
|
| 701 |
hidden_states: torch.Tensor,
|
| 702 |
attention_mask: Optional[torch.Tensor] = None,
|
| 703 |
-
position_ids: Optional[torch.Tensor] = None,
|
| 704 |
-
past_key_value: Optional[torch.Tensor] = None,
|
| 705 |
output_attentions: bool = False,
|
| 706 |
-
is_causal: bool = False,
|
| 707 |
cu_seqlens: Optional[torch.Tensor] = None,
|
| 708 |
max_seqlen: Optional[int] = None,
|
| 709 |
**kwargs,
|
| 710 |
):
|
| 711 |
-
# Unused arguments are accepted for interface compatibility
|
| 712 |
-
_ = position_ids
|
| 713 |
-
_ = past_key_value
|
| 714 |
-
_ = is_causal
|
| 715 |
-
_ = output_attentions
|
| 716 |
-
|
| 717 |
kwargs.pop("output_hidden_states", None)
|
| 718 |
kwargs.pop("return_dict", None)
|
| 719 |
-
if kwargs:
|
| 720 |
-
unexpected = ", ".join(sorted(kwargs))
|
| 721 |
-
raise TypeError(f"Unexpected kwargs for IsaacVisionAttention.forward: {unexpected}")
|
| 722 |
-
|
| 723 |
-
cached_cu, cached_max = self._consume_variable_length_metadata()
|
| 724 |
-
if cu_seqlens is None:
|
| 725 |
-
cu_seqlens = cached_cu
|
| 726 |
-
if max_seqlen is None:
|
| 727 |
-
max_seqlen = cached_max
|
| 728 |
-
|
| 729 |
-
# Expect packed sequences with batch_size == 1
|
| 730 |
-
batch_size, L, _ = hidden_states.shape
|
| 731 |
-
if batch_size != 1:
|
| 732 |
-
raise ValueError("packed variable-length attention expects batch_size=1")
|
| 733 |
-
x = hidden_states[0] # (L, E)
|
| 734 |
-
|
| 735 |
-
H = self.num_heads
|
| 736 |
-
D = self.head_dim
|
| 737 |
-
p_drop = self.dropout if self.training else 0.0
|
| 738 |
-
|
| 739 |
-
# Project and reshape to (L, H, D)
|
| 740 |
-
q = self.q_proj(x).view(L, H, D)
|
| 741 |
-
k = self.k_proj(x).view(L, H, D)
|
| 742 |
-
v = self.v_proj(x).view(L, H, D)
|
| 743 |
-
|
| 744 |
-
resolved_key = "isaac_sdpa"
|
| 745 |
-
if self.config._attn_implementation != "sdpa":
|
| 746 |
-
resolved_key = self.ATTENTION_KEY_MAP.get(self.config._attn_implementation, resolved_key)
|
| 747 |
-
|
| 748 |
-
attn_mask = ensure_document_attention_mask(
|
| 749 |
-
attention_mask,
|
| 750 |
-
cu_seqlens,
|
| 751 |
-
L,
|
| 752 |
-
q.dtype,
|
| 753 |
-
q.device,
|
| 754 |
-
return_mask_function=True,
|
| 755 |
-
)
|
| 756 |
-
|
| 757 |
-
attn_weights = None
|
| 758 |
-
if resolved_key in self._FLASH_IMPLS:
|
| 759 |
-
y_lhd = self._flash_attention_forward(
|
| 760 |
-
q_lhd=q,
|
| 761 |
-
k_lhd=k,
|
| 762 |
-
v_lhd=v,
|
| 763 |
-
cu_seqlens=cu_seqlens,
|
| 764 |
-
max_seqlen=max_seqlen,
|
| 765 |
-
dropout=p_drop,
|
| 766 |
-
)
|
| 767 |
-
elif resolved_key == "isaac_sdpa":
|
| 768 |
-
y_lhd = self._sdpa_attention_forward(
|
| 769 |
-
q_lhd=q,
|
| 770 |
-
k_lhd=k,
|
| 771 |
-
v_lhd=v,
|
| 772 |
-
attention_mask=attn_mask,
|
| 773 |
-
cu_seqlens=cu_seqlens,
|
| 774 |
-
dropout=p_drop,
|
| 775 |
-
)
|
| 776 |
-
elif resolved_key == "isaac_eager":
|
| 777 |
-
y_lhd, attn_weights = self._eager_attention_forward(
|
| 778 |
-
q_lhd=q,
|
| 779 |
-
k_lhd=k,
|
| 780 |
-
v_lhd=v,
|
| 781 |
-
attention_mask=attn_mask,
|
| 782 |
-
dropout=p_drop,
|
| 783 |
-
)
|
| 784 |
-
else:
|
| 785 |
-
attention_fn = ALL_ATTENTION_FUNCTIONS.get(resolved_key)
|
| 786 |
-
if attention_fn is None:
|
| 787 |
-
raise ValueError(f"Attention implementation {attn_impl} not found.")
|
| 788 |
-
|
| 789 |
-
query_states = q.transpose(0, 1).unsqueeze(0)
|
| 790 |
-
key_states = k.transpose(0, 1).unsqueeze(0)
|
| 791 |
-
value_states = v.transpose(0, 1).unsqueeze(0)
|
| 792 |
-
|
| 793 |
-
attention_kwargs: dict[str, Any] = {
|
| 794 |
-
"dropout": p_drop,
|
| 795 |
-
"scaling": self.scale,
|
| 796 |
-
"is_causal": False,
|
| 797 |
-
}
|
| 798 |
-
if cu_seqlens is not None:
|
| 799 |
-
attention_kwargs["cu_seq_lens_q"] = cu_seqlens
|
| 800 |
-
attention_kwargs["cu_seq_lens_k"] = cu_seqlens
|
| 801 |
-
if max_seqlen is not None:
|
| 802 |
-
attention_kwargs["max_length_q"] = max_seqlen
|
| 803 |
-
attention_kwargs["max_length_k"] = max_seqlen
|
| 804 |
-
|
| 805 |
-
attn_output, attn_weights = attention_fn(
|
| 806 |
-
self,
|
| 807 |
-
query_states,
|
| 808 |
-
key_states,
|
| 809 |
-
value_states,
|
| 810 |
-
attn_mask,
|
| 811 |
-
**attention_kwargs,
|
| 812 |
-
)
|
| 813 |
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 825 |
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
max_seqlen: Optional[int],
|
| 834 |
-
dropout: float,
|
| 835 |
-
) -> torch.Tensor:
|
| 836 |
-
L = q_lhd.size(0)
|
| 837 |
-
if max_seqlen is not None:
|
| 838 |
-
max_q = max_k = int(max_seqlen)
|
| 839 |
-
else:
|
| 840 |
-
max_q = max_k = self._max_from_cu(cu_seqlens, L)
|
| 841 |
-
|
| 842 |
-
if not q_lhd.is_contiguous():
|
| 843 |
-
q_lhd = q_lhd.contiguous()
|
| 844 |
-
if not k_lhd.is_contiguous():
|
| 845 |
-
k_lhd = k_lhd.contiguous()
|
| 846 |
-
if not v_lhd.is_contiguous():
|
| 847 |
-
v_lhd = v_lhd.contiguous()
|
| 848 |
-
|
| 849 |
-
out_lhd, *_ = torch.ops.aten._flash_attention_forward(
|
| 850 |
-
query=q_lhd,
|
| 851 |
-
key=k_lhd,
|
| 852 |
-
value=v_lhd,
|
| 853 |
-
cum_seq_q=cu_seqlens,
|
| 854 |
-
cum_seq_k=cu_seqlens,
|
| 855 |
-
max_q=max_q,
|
| 856 |
-
max_k=max_k,
|
| 857 |
-
dropout_p=dropout,
|
| 858 |
-
is_causal=False,
|
| 859 |
-
return_debug_mask=False,
|
| 860 |
-
scale=self.scale,
|
| 861 |
-
window_size_left=-1,
|
| 862 |
-
window_size_right=-1,
|
| 863 |
-
alibi_slopes=None,
|
| 864 |
-
)
|
| 865 |
-
return out_lhd
|
| 866 |
|
| 867 |
-
|
| 868 |
-
|
| 869 |
-
*,
|
| 870 |
-
q_lhd: torch.Tensor,
|
| 871 |
-
k_lhd: torch.Tensor,
|
| 872 |
-
v_lhd: torch.Tensor,
|
| 873 |
-
attention_mask: Optional[Union[torch.Tensor, Callable]],
|
| 874 |
-
cu_seqlens: Optional[torch.Tensor],
|
| 875 |
-
dropout: float,
|
| 876 |
-
) -> torch.Tensor:
|
| 877 |
-
L = q_lhd.size(0)
|
| 878 |
-
attn_mask = attention_mask
|
| 879 |
|
| 880 |
-
if
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
| 890 |
-
|
| 891 |
-
|
| 892 |
-
|
|
|
|
|
|
|
|
|
|
| 893 |
)
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
if attn_mask is not None and attn_mask.dtype != q.dtype:
|
| 903 |
-
attn_mask = attn_mask.to(q.dtype)
|
| 904 |
-
|
| 905 |
-
output = F.scaled_dot_product_attention(
|
| 906 |
-
q,
|
| 907 |
-
k,
|
| 908 |
-
v,
|
| 909 |
-
attn_mask=attn_mask,
|
| 910 |
-
dropout_p=dropout,
|
| 911 |
-
scale=self.scale,
|
| 912 |
-
is_causal=False,
|
| 913 |
)
|
| 914 |
-
return output.squeeze(0).permute(1, 0, 2).contiguous()
|
| 915 |
|
| 916 |
-
|
| 917 |
-
self,
|
| 918 |
-
*,
|
| 919 |
-
q_lhd: torch.Tensor,
|
| 920 |
-
k_lhd: torch.Tensor,
|
| 921 |
-
v_lhd: torch.Tensor,
|
| 922 |
-
attention_mask: Optional[Union[torch.Tensor, Callable]],
|
| 923 |
-
dropout: float,
|
| 924 |
-
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 925 |
-
L = q_lhd.size(0)
|
| 926 |
-
attn_mask = attention_mask
|
| 927 |
-
if callable(attn_mask):
|
| 928 |
-
cache_position = torch.arange(L, device=q_lhd.device, dtype=torch.long)
|
| 929 |
-
attn_mask = eager_mask(
|
| 930 |
-
batch_size=1,
|
| 931 |
-
cache_position=cache_position,
|
| 932 |
-
kv_length=L,
|
| 933 |
-
kv_offset=0,
|
| 934 |
-
mask_function=attn_mask,
|
| 935 |
-
attention_mask=None,
|
| 936 |
-
allow_is_bidirectional_skip=False,
|
| 937 |
-
use_vmap=False,
|
| 938 |
-
dtype=q_lhd.dtype,
|
| 939 |
-
)
|
| 940 |
-
if attn_mask is not None and attn_mask.dim() == 4:
|
| 941 |
-
attn_mask = attn_mask.squeeze(0).squeeze(0)
|
| 942 |
|
| 943 |
-
|
| 944 |
-
|
| 945 |
-
|
|
|
|
| 946 |
|
| 947 |
-
|
| 948 |
-
if
|
| 949 |
-
|
| 950 |
|
| 951 |
-
|
| 952 |
-
return attn_output_lhd, attn_weights
|
| 953 |
|
| 954 |
|
| 955 |
class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
|
|
@@ -976,26 +776,24 @@ class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
|
|
| 976 |
Maximum document length referenced by `cu_seqlens`. Passed to FlashAttention so it can size temporary
|
| 977 |
buffers for packed variable-length attention.
|
| 978 |
"""
|
| 979 |
-
|
| 980 |
-
|
| 981 |
-
|
| 982 |
-
|
| 983 |
-
)
|
| 984 |
-
|
| 985 |
-
attention_mask = ensure_document_attention_mask(
|
| 986 |
-
attention_mask,
|
| 987 |
-
cu_seqlens,
|
| 988 |
-
hidden_states.size(1),
|
| 989 |
-
hidden_states.dtype,
|
| 990 |
-
hidden_states.device,
|
| 991 |
-
)
|
| 992 |
-
|
| 993 |
-
return super().forward(
|
| 994 |
hidden_states,
|
| 995 |
attention_mask=attention_mask,
|
| 996 |
-
|
|
|
|
| 997 |
**kwargs,
|
| 998 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 999 |
|
| 1000 |
|
| 1001 |
class IsaacVisionEncoder(Siglip2Encoder):
|
|
@@ -1005,47 +803,22 @@ class IsaacVisionEncoder(Siglip2Encoder):
|
|
| 1005 |
super().__init__(config)
|
| 1006 |
self.layers = nn.ModuleList([IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 1007 |
|
| 1008 |
-
def __variable_length_context(self, cu_seqlens, max_seqlen) -> None:
|
| 1009 |
-
if cu_seqlens is None and max_seqlen is None:
|
| 1010 |
-
return
|
| 1011 |
-
|
| 1012 |
-
for layer in self.layers:
|
| 1013 |
-
if isinstance(layer, IsaacVisionEncoderLayer):
|
| 1014 |
-
layer.self_attn._variable_length_context(
|
| 1015 |
-
cu_seqlens=cu_seqlens,
|
| 1016 |
-
max_seqlen=max_seqlen,
|
| 1017 |
-
)
|
| 1018 |
-
|
| 1019 |
@can_return_tuple
|
|
|
|
| 1020 |
def forward(
|
| 1021 |
self,
|
| 1022 |
inputs_embeds,
|
| 1023 |
attention_mask: Optional[torch.Tensor] = None,
|
| 1024 |
-
cu_seqlens: Optional[torch.Tensor] = None,
|
| 1025 |
-
max_seqlen: Optional[int] = None,
|
| 1026 |
-
output_attentions: Optional[bool] = None,
|
| 1027 |
-
output_hidden_states: Optional[bool] = None,
|
| 1028 |
-
return_dict: Optional[bool] = None,
|
| 1029 |
**kwargs: Unpack[TransformersKwargs],
|
| 1030 |
):
|
| 1031 |
-
|
| 1032 |
-
|
| 1033 |
-
|
| 1034 |
-
|
| 1035 |
-
|
| 1036 |
-
|
| 1037 |
-
|
| 1038 |
-
|
| 1039 |
-
)
|
| 1040 |
-
|
| 1041 |
-
return super().forward(
|
| 1042 |
-
inputs_embeds,
|
| 1043 |
-
attention_mask=attention_mask,
|
| 1044 |
-
output_attentions=output_attentions,
|
| 1045 |
-
output_hidden_states=output_hidden_states,
|
| 1046 |
-
return_dict=return_dict,
|
| 1047 |
-
**kwargs,
|
| 1048 |
-
)
|
| 1049 |
|
| 1050 |
|
| 1051 |
def create_pixel_shuffle_index_map(
|
|
@@ -1141,15 +914,15 @@ def pixel_shuffle_varlen(
|
|
| 1141 |
Raises:
|
| 1142 |
ValueError: If more than one batch item is provided.
|
| 1143 |
"""
|
| 1144 |
-
|
| 1145 |
-
if
|
| 1146 |
if x.size(0) != 1:
|
| 1147 |
raise AssertionError("Packed sequence is expected to have batch_size == 1")
|
| 1148 |
-
|
| 1149 |
else:
|
| 1150 |
-
|
| 1151 |
|
| 1152 |
-
embed_dim =
|
| 1153 |
scale_factor = int(scale_factor)
|
| 1154 |
|
| 1155 |
# Calculate seq_sizes from token_grids
|
|
@@ -1160,22 +933,24 @@ def pixel_shuffle_varlen(
|
|
| 1160 |
seq_sizes=seq_sizes,
|
| 1161 |
token_grids=token_grids,
|
| 1162 |
scale_factor=scale_factor,
|
| 1163 |
-
device=
|
| 1164 |
) # (new_seq, scale_factor**2)
|
| 1165 |
|
| 1166 |
# Gather → (new_seq, scale_factor**2, embed_dim)
|
| 1167 |
-
gathered =
|
| 1168 |
|
| 1169 |
# Merge the scale_factor**2 group dimension into channels to finish the shuffle
|
| 1170 |
out = gathered.reshape(gathered.size(0), embed_dim * scale_factor * scale_factor)
|
| 1171 |
|
| 1172 |
# Restore batch dimension if needed
|
| 1173 |
-
if
|
| 1174 |
out = out.unsqueeze(0)
|
| 1175 |
return out
|
| 1176 |
|
| 1177 |
|
| 1178 |
class IsaacVisionTransformer(nn.Module):
|
|
|
|
|
|
|
| 1179 |
def __init__(self, config: IsaacVisionConfig):
|
| 1180 |
super().__init__()
|
| 1181 |
self.config = config
|
|
@@ -1197,14 +972,14 @@ class IsaacVisionTransformer(nn.Module):
|
|
| 1197 |
# Generate cumulative sequence lengths for variable-length attention
|
| 1198 |
cu_seqlens = torch.zeros(seq_sizes.size(0) + 1, dtype=torch.int32, device=hidden_states.device)
|
| 1199 |
cu_seqlens[1:] = seq_sizes.cumsum(0)
|
| 1200 |
-
|
|
|
|
| 1201 |
|
| 1202 |
# Pass through encoder with variable-length attention parameters
|
| 1203 |
encoder_outputs = self.encoder(
|
| 1204 |
inputs_embeds=hidden_states,
|
|
|
|
| 1205 |
cu_seqlens=cu_seqlens,
|
| 1206 |
-
max_seqlen=max_seqlen,
|
| 1207 |
-
return_dict=True,
|
| 1208 |
)
|
| 1209 |
hidden_states = encoder_outputs.last_hidden_state
|
| 1210 |
|
|
@@ -1223,20 +998,35 @@ class IsaacVisionTransformer(nn.Module):
|
|
| 1223 |
return hidden_states
|
| 1224 |
|
| 1225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1226 |
class IsaacVisionEmbedding(nn.Module):
|
| 1227 |
"""Vision embedding wrapper exposing tower and projector."""
|
| 1228 |
|
|
|
|
|
|
|
| 1229 |
def __init__(self, config: IsaacConfig):
|
| 1230 |
super().__init__()
|
| 1231 |
vision_cfg = config.vision_config
|
| 1232 |
-
hidden_dim = vision_cfg.hidden_size * (vision_cfg.pixel_shuffle_scale_factor**2)
|
| 1233 |
|
| 1234 |
self.vision_tower = IsaacVisionTransformer(vision_cfg)
|
| 1235 |
-
self.multimodal_projector =
|
| 1236 |
-
nn.Linear(hidden_dim, 4 * hidden_dim, bias=False),
|
| 1237 |
-
nn.SiLU(),
|
| 1238 |
-
nn.Linear(4 * hidden_dim, config.hidden_size, bias=False),
|
| 1239 |
-
)
|
| 1240 |
|
| 1241 |
def forward(self, vision_tokens: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
|
| 1242 |
hidden_states = self.vision_tower(vision_tokens)
|
|
@@ -1333,31 +1123,6 @@ def get_image_size_for_max_num_patches(
|
|
| 1333 |
return target_height, target_width
|
| 1334 |
|
| 1335 |
|
| 1336 |
-
def patchify_vision(image: torch.Tensor, patch_size: int) -> torch.Tensor:
|
| 1337 |
-
r"""Convert normalized images into flattened ViT-style patches.
|
| 1338 |
-
|
| 1339 |
-
Args:
|
| 1340 |
-
image (`torch.Tensor`):
|
| 1341 |
-
Tensor of shape `(num_images, height, width, channels)`.
|
| 1342 |
-
patch_size (`int`):
|
| 1343 |
-
Edge length of the square patches
|
| 1344 |
-
|
| 1345 |
-
Returns:
|
| 1346 |
-
`torch.Tensor`:
|
| 1347 |
-
Patch tensor where each position stores the flattened pixels belonging to that patch.
|
| 1348 |
-
|
| 1349 |
-
Raises:
|
| 1350 |
-
ValueError: If `height` or `width` is not divisible by `patch_size`.
|
| 1351 |
-
"""
|
| 1352 |
-
num_images, height, width, channels = image.shape
|
| 1353 |
-
if height % patch_size or width % patch_size:
|
| 1354 |
-
raise ValueError(f"Dimensions of images {image.shape} are not divisible by patch_size={patch_size}.")
|
| 1355 |
-
patches = image.reshape(num_images, height // patch_size, patch_size, width // patch_size, patch_size, channels)
|
| 1356 |
-
patches = patches.permute(0, 1, 3, 2, 4, 5)
|
| 1357 |
-
patches = patches.reshape(num_images, height // patch_size, width // patch_size, channels * patch_size * patch_size)
|
| 1358 |
-
return patches
|
| 1359 |
-
|
| 1360 |
-
|
| 1361 |
class IsaacConfig(PretrainedConfig):
|
| 1362 |
"""Configuration class for Isaac multimodal model.
|
| 1363 |
|
|
@@ -1378,37 +1143,25 @@ class IsaacConfig(PretrainedConfig):
|
|
| 1378 |
vision_token: str = "<image>",
|
| 1379 |
**kwargs,
|
| 1380 |
):
|
| 1381 |
-
|
| 1382 |
-
resolved_text_config = kwargs.pop("text_config", text_config)
|
| 1383 |
-
if isinstance(resolved_text_config, Qwen3Config):
|
| 1384 |
-
text_config_kwargs = copy.deepcopy(resolved_text_config.to_dict())
|
| 1385 |
-
elif isinstance(resolved_text_config, dict):
|
| 1386 |
-
text_config_kwargs = copy.deepcopy(resolved_text_config)
|
| 1387 |
-
elif resolved_text_config is None:
|
| 1388 |
-
text_config_kwargs = {}
|
| 1389 |
-
else:
|
| 1390 |
-
raise TypeError("`text_config` must be a mapping or `Qwen3Config` instance when provided.")
|
| 1391 |
|
| 1392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1393 |
|
| 1394 |
-
|
| 1395 |
-
|
| 1396 |
-
|
| 1397 |
-
if rope_theta_override is None:
|
| 1398 |
-
rope_theta_override = getattr(Qwen3Config(), "rope_theta", 10000.0)
|
| 1399 |
-
self.text_config.rope_theta = rope_theta_override
|
| 1400 |
|
| 1401 |
super().__init__(**kwargs)
|
| 1402 |
|
| 1403 |
-
|
| 1404 |
-
|
| 1405 |
-
else:
|
| 1406 |
-
self.text_config.rope_scaling = self._rope_scaling
|
| 1407 |
|
| 1408 |
-
#
|
| 1409 |
-
self._rope_parameters = self._rope_scaling
|
| 1410 |
-
|
| 1411 |
-
# Mirror frequently accessed Qwen3 attributes at the composite config level for BC.
|
| 1412 |
self.vocab_size = self.text_config.vocab_size
|
| 1413 |
self.hidden_size = self.text_config.hidden_size
|
| 1414 |
self.num_hidden_layers = self.text_config.num_hidden_layers
|
|
@@ -1416,10 +1169,7 @@ class IsaacConfig(PretrainedConfig):
|
|
| 1416 |
self.head_dim = self.text_config.head_dim
|
| 1417 |
self.hidden_act = self.text_config.hidden_act
|
| 1418 |
self.use_cache = self.text_config.use_cache
|
| 1419 |
-
self.rope_theta = self.
|
| 1420 |
-
|
| 1421 |
-
# Validate rotary parameters now that they have been mirrored locally.
|
| 1422 |
-
rope_config_validation(self)
|
| 1423 |
|
| 1424 |
self.layer_types = getattr(self.text_config, "layer_types", None)
|
| 1425 |
layer_type_validation(self.layer_types, self.num_hidden_layers)
|
|
@@ -1432,6 +1182,15 @@ class IsaacConfig(PretrainedConfig):
|
|
| 1432 |
elif vision_config is None:
|
| 1433 |
self.vision_config = self.sub_configs["vision_config"]()
|
| 1434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1435 |
# Vision normalization parameters
|
| 1436 |
self.vision_rescale_factor = float(vision_rescale_factor)
|
| 1437 |
|
|
@@ -1439,33 +1198,6 @@ class IsaacConfig(PretrainedConfig):
|
|
| 1439 |
self.max_sequence_length = max_sequence_length
|
| 1440 |
self.vision_token = vision_token
|
| 1441 |
|
| 1442 |
-
@property
|
| 1443 |
-
def rope_scaling(self):
|
| 1444 |
-
if hasattr(self, "text_config") and self.text_config is not None:
|
| 1445 |
-
return getattr(self.text_config, "rope_scaling", None)
|
| 1446 |
-
return self._rope_scaling
|
| 1447 |
-
|
| 1448 |
-
@rope_scaling.setter
|
| 1449 |
-
def rope_scaling(self, value):
|
| 1450 |
-
self._rope_scaling = value
|
| 1451 |
-
if hasattr(self, "text_config") and self.text_config is not None:
|
| 1452 |
-
self.text_config.rope_scaling = value
|
| 1453 |
-
|
| 1454 |
-
@property
|
| 1455 |
-
def rope_parameters(self) -> dict[str, Any] | None:
|
| 1456 |
-
"""Alias introduced upstream for rope scaling dictionaries."""
|
| 1457 |
-
value = self._rope_parameters
|
| 1458 |
-
if value is None:
|
| 1459 |
-
value = self.rope_scaling
|
| 1460 |
-
if value is None:
|
| 1461 |
-
return {"rope_type": "default"}
|
| 1462 |
-
return value
|
| 1463 |
-
|
| 1464 |
-
@rope_parameters.setter
|
| 1465 |
-
def rope_parameters(self, value: dict[str, Any] | None) -> None:
|
| 1466 |
-
self._rope_parameters = value
|
| 1467 |
-
self.rope_scaling = value
|
| 1468 |
-
|
| 1469 |
def to_dict(self):
|
| 1470 |
output = super().to_dict()
|
| 1471 |
# Ensure nested configs round-trip through dict serialization
|
|
@@ -1527,7 +1259,7 @@ def create_text_event(tokenizer: AutoTokenizer, text: str, time: float = 0.0) ->
|
|
| 1527 |
class IsaacProcessor(ProcessorMixin):
|
| 1528 |
attributes = ["image_processor", "tokenizer"]
|
| 1529 |
image_processor_class = ("IsaacImageProcessorFast",)
|
| 1530 |
-
tokenizer_class = ("Qwen2Tokenizer",
|
| 1531 |
|
| 1532 |
def __init__(
|
| 1533 |
self,
|
|
@@ -1570,7 +1302,7 @@ class IsaacProcessor(ProcessorMixin):
|
|
| 1570 |
def build_event_stream_simple(
|
| 1571 |
self,
|
| 1572 |
text: str,
|
| 1573 |
-
images: Optional[list[
|
| 1574 |
) -> Stream:
|
| 1575 |
events = []
|
| 1576 |
# Process text and images
|
|
@@ -1616,7 +1348,7 @@ class IsaacProcessor(ProcessorMixin):
|
|
| 1616 |
def __call__(
|
| 1617 |
self,
|
| 1618 |
text: Union[str, list[str]],
|
| 1619 |
-
images: Optional[Union[
|
| 1620 |
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
| 1621 |
**kwargs,
|
| 1622 |
) -> BatchFeature:
|
|
@@ -1637,7 +1369,7 @@ class IsaacProcessor(ProcessorMixin):
|
|
| 1637 |
texts = text
|
| 1638 |
|
| 1639 |
if images is not None:
|
| 1640 |
-
if isinstance(images,
|
| 1641 |
images_list = [images]
|
| 1642 |
else:
|
| 1643 |
images_list = images
|
|
@@ -1707,12 +1439,10 @@ def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor:
|
|
| 1707 |
return position_ids
|
| 1708 |
|
| 1709 |
|
| 1710 |
-
class IsaacRotaryEmbedding(
|
| 1711 |
EXTRA_ROPE_KEYS = {"mrope_section", "mrope_interleaved"}
|
| 1712 |
|
| 1713 |
def __init__(self, config: IsaacConfig, device=None):
|
| 1714 |
-
super().__init__()
|
| 1715 |
-
|
| 1716 |
rope_source_cfg = config.get_text_config() if hasattr(config, "get_text_config") else config
|
| 1717 |
rope_scaling = getattr(rope_source_cfg, "rope_scaling", None) or {}
|
| 1718 |
|
|
@@ -1721,9 +1451,9 @@ class IsaacRotaryEmbedding(nn.Module):
|
|
| 1721 |
config_for_rope.rope_scaling = sanitized_scaling if sanitized_scaling else None
|
| 1722 |
|
| 1723 |
init_device = device if device is not None and getattr(device, "type", None) != "meta" else None
|
| 1724 |
-
|
| 1725 |
|
| 1726 |
-
rotary_half_dim = self.
|
| 1727 |
self.mrope_section = self._resolve_mrope_section(rope_scaling.get("mrope_section"), rotary_half_dim)
|
| 1728 |
self.hidden_size = getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size
|
| 1729 |
|
|
@@ -1749,10 +1479,6 @@ class IsaacRotaryEmbedding(nn.Module):
|
|
| 1749 |
chunks = tensor.split(split_sections, dim=-1)
|
| 1750 |
return torch.cat([chunk[i % 3] for i, chunk in enumerate(chunks)], dim=-1)
|
| 1751 |
|
| 1752 |
-
@property
|
| 1753 |
-
def inv_freq(self) -> torch.Tensor:
|
| 1754 |
-
return self._qwen_rotary.inv_freq
|
| 1755 |
-
|
| 1756 |
def forward(
|
| 1757 |
self,
|
| 1758 |
position_ids: torch.Tensor,
|
|
@@ -1776,14 +1502,15 @@ class IsaacRotaryEmbedding(nn.Module):
|
|
| 1776 |
|
| 1777 |
with torch.no_grad():
|
| 1778 |
pos = position_ids.clone()
|
| 1779 |
-
|
|
|
|
| 1780 |
if not_spatial.any():
|
| 1781 |
data_1d = pos[not_spatial][..., 0].unsqueeze(-1)
|
| 1782 |
pos[not_spatial] = data_1d.expand(-1, pos.shape[-1])
|
| 1783 |
|
| 1784 |
pos_axes = pos.permute(2, 0, 1).contiguous()
|
| 1785 |
|
| 1786 |
-
cos_axes, sin_axes =
|
| 1787 |
|
| 1788 |
cos_axes = cos_axes.to(hidden_states.dtype)
|
| 1789 |
sin_axes = sin_axes.to(hidden_states.dtype)
|
|
@@ -1796,6 +1523,11 @@ class IsaacRotaryEmbedding(nn.Module):
|
|
| 1796 |
|
| 1797 |
class IsaacModel(Qwen3PreTrainedModel):
|
| 1798 |
supports_gradient_checkpointing = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1799 |
|
| 1800 |
def __init__(self, config: IsaacConfig):
|
| 1801 |
Qwen3PreTrainedModel.__init__(self, config)
|
|
@@ -1812,6 +1544,7 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1812 |
raise ValueError("IsaacConfig should always have vision_config")
|
| 1813 |
|
| 1814 |
self.vision_embedding = IsaacVisionEmbedding(config)
|
|
|
|
| 1815 |
|
| 1816 |
# Dispatch table for TensorStream balanced embedding (text + vision)
|
| 1817 |
self.embed_fns = {
|
|
@@ -1824,11 +1557,24 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1824 |
self.vision_rescale_factor = config.vision_rescale_factor
|
| 1825 |
self.vision_token = config.vision_token
|
| 1826 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1827 |
def get_input_embeddings(self) -> nn.Module:
|
| 1828 |
return self.text_model.get_input_embeddings()
|
| 1829 |
|
| 1830 |
def set_input_embeddings(self, value: nn.Module) -> None:
|
| 1831 |
self.text_model.set_input_embeddings(value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1832 |
|
| 1833 |
@property
|
| 1834 |
def embed_tokens(self) -> nn.Module:
|
|
@@ -1839,12 +1585,16 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1839 |
self.text_model.embed_tokens = value
|
| 1840 |
|
| 1841 |
@property
|
| 1842 |
-
def
|
| 1843 |
-
return self.
|
| 1844 |
|
| 1845 |
@property
|
| 1846 |
-
def
|
| 1847 |
-
return self.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1848 |
|
| 1849 |
def embed_text_tokens(self, token_ids: torch.Tensor) -> torch.Tensor:
|
| 1850 |
"""Embed text tokens, squeezing singleton dimensions."""
|
|
@@ -1893,6 +1643,62 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1893 |
h = embedded_ts.compact() # (B, T, D)
|
| 1894 |
return h
|
| 1895 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1896 |
@auto_docstring
|
| 1897 |
@check_model_inputs
|
| 1898 |
def forward(
|
|
@@ -1905,10 +1711,8 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1905 |
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
| 1906 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1907 |
use_cache: Optional[bool] = None,
|
| 1908 |
-
output_hidden_states: Optional[bool] = None,
|
| 1909 |
-
return_dict: Optional[bool] = None,
|
| 1910 |
cache_position: Optional[torch.LongTensor] = None,
|
| 1911 |
-
**kwargs,
|
| 1912 |
) -> tuple | BaseModelOutputWithPast:
|
| 1913 |
"""
|
| 1914 |
Forward pass with MRoPE position embeddings.
|
|
@@ -1926,89 +1730,81 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 1926 |
omitted.
|
| 1927 |
"""
|
| 1928 |
|
|
|
|
|
|
|
| 1929 |
# Get inputs
|
| 1930 |
if tensor_stream is not None and inputs_embeds is not None:
|
| 1931 |
raise ValueError("You cannot specify both tensor_stream and inputs_embeds")
|
| 1932 |
-
|
| 1933 |
-
# Embed TensorStream directly
|
| 1934 |
-
inputs_embeds = self.embed_stream(tensor_stream)
|
| 1935 |
-
# Create modality tensor if not provided
|
| 1936 |
-
if modality_tensor is None:
|
| 1937 |
-
modality_tensor = modality_mask(tensor_stream)
|
| 1938 |
-
elif input_ids is not None and inputs_embeds is not None:
|
| 1939 |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1940 |
elif input_ids is not None:
|
| 1941 |
inputs_embeds = self.text_model.embed_tokens(input_ids)
|
| 1942 |
-
# Create text modality tensor if not provided
|
| 1943 |
-
if modality_tensor is None:
|
| 1944 |
-
batch_size, seq_length = input_ids.shape
|
| 1945 |
-
modality_tensor = torch.full(
|
| 1946 |
-
(batch_size, seq_length), TextType.text.value, device=input_ids.device, dtype=torch.long
|
| 1947 |
-
)
|
| 1948 |
elif inputs_embeds is None:
|
| 1949 |
raise ValueError("You have to specify either tensor_stream, input_ids or inputs_embeds")
|
| 1950 |
|
|
|
|
|
|
|
| 1951 |
# Ensure cache exists when requested
|
| 1952 |
if use_cache and past_key_values is None:
|
| 1953 |
cache_config = self.config.get_text_config() if hasattr(self.config, "get_text_config") else self.config
|
| 1954 |
past_key_values = DynamicCache(config=cache_config)
|
| 1955 |
|
| 1956 |
-
if cache_position is None
|
| 1957 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 1958 |
-
cache_position = torch.arange(
|
| 1959 |
-
past_seen_tokens,
|
| 1960 |
-
past_seen_tokens + inputs_embeds.shape[1],
|
| 1961 |
-
device=inputs_embeds.device,
|
| 1962 |
-
)
|
| 1963 |
|
| 1964 |
-
|
| 1965 |
-
|
| 1966 |
-
if tensor_stream is not None:
|
| 1967 |
-
position_ids = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
|
| 1968 |
-
else:
|
| 1969 |
-
position_ids = compute_position_ids_input_ids(input_ids)
|
| 1970 |
|
| 1971 |
-
|
| 1972 |
-
|
| 1973 |
-
|
| 1974 |
-
|
| 1975 |
-
|
|
|
|
| 1976 |
)
|
| 1977 |
-
cos = cos.to(inputs_embeds.dtype)
|
| 1978 |
-
sin = sin.to(inputs_embeds.dtype)
|
| 1979 |
|
| 1980 |
# Prepare attention mask
|
| 1981 |
-
|
| 1982 |
if not isinstance(attention_mask, dict):
|
| 1983 |
-
|
| 1984 |
-
|
| 1985 |
-
|
| 1986 |
-
|
| 1987 |
-
|
| 1988 |
-
|
| 1989 |
-
|
| 1990 |
-
|
| 1991 |
-
|
|
|
|
| 1992 |
|
| 1993 |
# Initialize hidden states
|
| 1994 |
hidden_states = inputs_embeds
|
|
|
|
| 1995 |
|
| 1996 |
for decoder_layer in self.text_model.layers:
|
| 1997 |
layer_attention_mask = (
|
| 1998 |
-
attention_mask[decoder_layer.attention_type] if
|
| 1999 |
)
|
| 2000 |
layer_outputs = decoder_layer(
|
| 2001 |
hidden_states,
|
| 2002 |
attention_mask=layer_attention_mask,
|
| 2003 |
-
position_ids=
|
| 2004 |
past_key_values=past_key_values,
|
| 2005 |
use_cache=use_cache,
|
| 2006 |
cache_position=cache_position,
|
| 2007 |
position_embeddings=(cos, sin),
|
|
|
|
| 2008 |
**kwargs,
|
| 2009 |
)
|
| 2010 |
|
| 2011 |
-
|
|
|
|
|
|
|
|
|
|
| 2012 |
|
| 2013 |
# Final layer norm
|
| 2014 |
hidden_states = self.text_model.norm(hidden_states)
|
|
@@ -2016,6 +1812,8 @@ class IsaacModel(Qwen3PreTrainedModel):
|
|
| 2016 |
return BaseModelOutputWithPast(
|
| 2017 |
last_hidden_state=hidden_states,
|
| 2018 |
past_key_values=past_key_values,
|
|
|
|
|
|
|
| 2019 |
)
|
| 2020 |
|
| 2021 |
|
|
@@ -2023,6 +1821,9 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2023 |
"""Isaac multimodal model for conditional generation."""
|
| 2024 |
|
| 2025 |
config_class = IsaacConfig
|
|
|
|
|
|
|
|
|
|
| 2026 |
|
| 2027 |
def __init__(self, config: IsaacConfig):
|
| 2028 |
super().__init__(config)
|
|
@@ -2032,39 +1833,6 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2032 |
# Tracks rotary position offsets computed during a full forward pass so decode steps can reuse them.
|
| 2033 |
self.rope_deltas = None
|
| 2034 |
|
| 2035 |
-
def get_rope_index(
|
| 2036 |
-
self,
|
| 2037 |
-
input_ids: Optional[torch.Tensor],
|
| 2038 |
-
tensor_stream: Optional[TensorStream],
|
| 2039 |
-
attention_mask: Optional[torch.Tensor],
|
| 2040 |
-
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 2041 |
-
"""Compute MRoPE position ids from a TensorStream (or 1D fallback).
|
| 2042 |
-
|
| 2043 |
-
Returns (position_ids, rope_deltas). position_ids is (B,L,3) for MRoPE.
|
| 2044 |
-
rope_deltas is (B,1) used to advance positions in decode.
|
| 2045 |
-
"""
|
| 2046 |
-
# tensor_stream present: compute 3D coords
|
| 2047 |
-
if tensor_stream is None and input_ids is None:
|
| 2048 |
-
raise ValueError("`tensor_stream` or `input_ids` must be provided to compute rope indices")
|
| 2049 |
-
|
| 2050 |
-
if tensor_stream is not None:
|
| 2051 |
-
pos_3d = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
|
| 2052 |
-
else:
|
| 2053 |
-
pos_3d = compute_position_ids_input_ids(input_ids)
|
| 2054 |
-
B, L, _ = pos_3d.shape
|
| 2055 |
-
|
| 2056 |
-
# Max position per batch across the 3 planes and sequence dimension: (B,)
|
| 2057 |
-
m_per_batch = pos_3d.amax(dim=(1, 2))
|
| 2058 |
-
|
| 2059 |
-
# Sequence lengths per batch: (B,)
|
| 2060 |
-
if attention_mask is None:
|
| 2061 |
-
seq_lens = torch.full_like(m_per_batch, L)
|
| 2062 |
-
else:
|
| 2063 |
-
seq_lens = attention_mask.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=m_per_batch.device)
|
| 2064 |
-
|
| 2065 |
-
rope_deltas = (m_per_batch + 1 - seq_lens).to(dtype=pos_3d.dtype).unsqueeze(1)
|
| 2066 |
-
return pos_3d, rope_deltas
|
| 2067 |
-
|
| 2068 |
def forward(
|
| 2069 |
self,
|
| 2070 |
input_ids: Optional[torch.LongTensor] = None,
|
|
@@ -2075,10 +1843,8 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2075 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 2076 |
labels: Optional[torch.LongTensor] = None,
|
| 2077 |
use_cache: Optional[bool] = None,
|
| 2078 |
-
output_hidden_states: Optional[bool] = None,
|
| 2079 |
-
return_dict: Optional[bool] = None,
|
| 2080 |
cache_position: Optional[torch.LongTensor] = None,
|
| 2081 |
-
**kwargs,
|
| 2082 |
) -> tuple | CausalLMOutputWithPast:
|
| 2083 |
r"""
|
| 2084 |
Forward pass for conditional generation supporting both standard inputs and TensorStream.
|
|
@@ -2089,48 +1855,43 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2089 |
`input_ids`.
|
| 2090 |
"""
|
| 2091 |
|
| 2092 |
-
|
|
|
|
|
|
|
| 2093 |
if tensor_stream is not None:
|
| 2094 |
input_ids = None
|
| 2095 |
if input_ids is None and inputs_embeds is None and tensor_stream is None:
|
| 2096 |
raise ValueError("Either input_ids, inputs_embeds, or tensor_stream must be provided.")
|
| 2097 |
|
| 2098 |
-
#
|
| 2099 |
-
# During decode we reuse `self.rope_deltas` computed on the initial forward pass; `rope_delta` captures how far
|
| 2100 |
-
# cached rotary phases have progressed so we can advance `position_ids` without rebuilding the TensorStream.
|
| 2101 |
if position_ids is None and tensor_stream is not None:
|
| 2102 |
position_ids, self.rope_deltas = self.get_rope_index(input_ids, tensor_stream, attention_mask)
|
| 2103 |
-
elif position_ids is None and
|
| 2104 |
-
#
|
| 2105 |
-
|
| 2106 |
-
|
| 2107 |
-
# Combine the incremental decode step (`cache_position`) with cached offsets so hidden states continue
|
| 2108 |
-
# rotating in lockstep across generation steps.
|
| 2109 |
-
rope_delta = (cache_position[0] + self.rope_deltas).to(input_ids.device)
|
| 2110 |
else:
|
| 2111 |
-
|
| 2112 |
-
|
| 2113 |
-
batch_size =
|
| 2114 |
-
|
| 2115 |
-
|
| 2116 |
|
| 2117 |
-
|
| 2118 |
-
|
| 2119 |
-
|
| 2120 |
-
|
| 2121 |
-
modality_tensor = torch.empty(batch_size, seq_len, device=position_ids.device).fill_(TextType.text.value)
|
| 2122 |
|
| 2123 |
outputs = self.model(
|
| 2124 |
input_ids=input_ids,
|
| 2125 |
tensor_stream=tensor_stream,
|
| 2126 |
attention_mask=attention_mask,
|
| 2127 |
position_ids=position_ids,
|
| 2128 |
-
modality_tensor=
|
| 2129 |
past_key_values=past_key_values,
|
| 2130 |
inputs_embeds=inputs_embeds,
|
| 2131 |
use_cache=use_cache,
|
| 2132 |
-
|
| 2133 |
-
return_dict=return_dict,
|
| 2134 |
cache_position=cache_position,
|
| 2135 |
**kwargs,
|
| 2136 |
)
|
|
@@ -2147,9 +1908,55 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2147 |
logits=logits,
|
| 2148 |
past_key_values=outputs.past_key_values,
|
| 2149 |
hidden_states=outputs.hidden_states,
|
| 2150 |
-
attentions=None,
|
| 2151 |
)
|
| 2152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2153 |
def prepare_inputs_for_generation(
|
| 2154 |
self,
|
| 2155 |
input_ids: torch.LongTensor,
|
|
@@ -2196,25 +2003,25 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin):
|
|
| 2196 |
|
| 2197 |
cache_position = model_inputs.get("cache_position", cache_position)
|
| 2198 |
|
| 2199 |
-
# Handle TensorStream for
|
| 2200 |
-
|
|
|
|
| 2201 |
model_inputs["tensor_stream"] = tensor_stream
|
| 2202 |
-
|
| 2203 |
-
|
| 2204 |
-
|
| 2205 |
-
if cache_position is not None and cache_position[0] != 0:
|
| 2206 |
model_inputs["tensor_stream"] = None
|
| 2207 |
-
return model_inputs
|
| 2208 |
|
| 2209 |
-
|
| 2210 |
-
|
|
|
|
|
|
|
| 2211 |
|
|
|
|
| 2212 |
|
| 2213 |
-
|
| 2214 |
-
|
| 2215 |
-
|
| 2216 |
-
exist_ok=True,
|
| 2217 |
-
)
|
| 2218 |
|
| 2219 |
|
| 2220 |
def _compute_residual_p_frames(frames: torch.Tensor, is_p_frame: list[bool]) -> torch.Tensor:
|
|
|
|
| 88 |
from collections import defaultdict
|
| 89 |
from typing import Any, Callable, Optional, Sequence, Union
|
| 90 |
|
| 91 |
+
from PIL.Image import Image
|
| 92 |
import torch
|
| 93 |
import torch.nn as nn
|
| 94 |
import torch.nn.functional as F
|
|
|
|
| 133 |
from transformers.masking_utils import create_masks_for_generate, eager_mask, packed_sequence_mask_function, sdpa_mask
|
| 134 |
from transformers.processing_utils import ImagesKwargs, ProcessorMixin, Unpack
|
| 135 |
from transformers.utils import auto_docstring, TensorType
|
| 136 |
+
from transformers.utils.generic import OutputRecorder, can_return_tuple, check_model_inputs
|
| 137 |
|
| 138 |
# Vision preprocessing constants
|
| 139 |
from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN
|
|
|
|
| 141 |
from transformers.utils.import_utils import is_torchdynamo_compiling
|
| 142 |
|
| 143 |
try:
|
| 144 |
+
from perceptron.tensorstream.ops import (
|
| 145 |
+
compute_mrope_pos_tensor,
|
| 146 |
+
modality_mask,
|
| 147 |
+
reconstruct_tensor_stream_from_compact_dict,
|
| 148 |
+
tensor_stream_token_view,
|
| 149 |
+
)
|
| 150 |
+
from perceptron.tensorstream.ops import (
|
| 151 |
+
slice as ts_slice,
|
| 152 |
+
)
|
| 153 |
+
from perceptron.tensorstream.tensorstream import (
|
| 154 |
Event,
|
| 155 |
Stream,
|
| 156 |
TensorStream,
|
|
|
|
| 159 |
create_stream,
|
| 160 |
group_streams,
|
| 161 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
except ModuleNotFoundError as exc: # pragma: no cover - import guard
|
| 163 |
raise ModuleNotFoundError(
|
| 164 |
"genesis.public.tensorstream is required for the Isaac HuggingFace integration. "
|
| 165 |
"Ensure the TensorStream package is installed and on PYTHONPATH."
|
| 166 |
) from exc
|
| 167 |
|
| 168 |
+
# _ORIGINAL_ATTENTION_FUNCTIONS: dict[str, Callable[..., tuple[torch.Tensor, Optional[torch.Tensor]]]] = {}
|
| 169 |
+
# for _attn_name in ("flash_attention_2", "sdpa", "eager"):
|
| 170 |
+
# if _attn_name in ALL_ATTENTION_FUNCTIONS:
|
| 171 |
+
# _ORIGINAL_ATTENTION_FUNCTIONS[_attn_name] = ALL_ATTENTION_FUNCTIONS[_attn_name]
|
| 172 |
|
| 173 |
|
| 174 |
class IsaacVisionConfig(Siglip2VisionConfig):
|
|
|
|
| 217 |
# Add our custom fields
|
| 218 |
self.pixel_shuffle_scale_factor = pixel_shuffle_scale_factor
|
| 219 |
|
| 220 |
+
# Ensure a sensible default attention backend
|
| 221 |
+
if getattr(self, "_attn_implementation", None) is None:
|
| 222 |
+
self._attn_implementation = "sdpa"
|
| 223 |
|
| 224 |
+
|
| 225 |
+
class IsaacImageProcessorFastKwargs(ImagesKwargs, total=False):
|
| 226 |
patch_size: Optional[int]
|
| 227 |
max_num_patches: Optional[int]
|
| 228 |
min_num_patches: Optional[int]
|
|
|
|
| 236 |
|
| 237 |
resample = PILImageResampling.BILINEAR
|
| 238 |
model_input_names = ["patches", "token_grids"]
|
| 239 |
+
valid_kwargs = IsaacImageProcessorFastKwargs
|
| 240 |
unused_kwargs = ["size", "do_center_crop", "crop_size"]
|
| 241 |
|
| 242 |
do_resize = True
|
|
|
|
|
|
|
| 243 |
do_center_crop = False
|
|
|
|
| 244 |
patch_size: Optional[int] = 16
|
| 245 |
max_num_patches: Optional[int] = 256
|
| 246 |
min_num_patches: Optional[int] = None
|
| 247 |
pixel_shuffle_scale: Optional[int] = 1
|
| 248 |
do_pad = False
|
|
|
|
| 249 |
do_rescale = True
|
|
|
|
| 250 |
do_normalize = True
|
| 251 |
image_mean = list(VISION_MEAN)
|
| 252 |
image_std = list(VISION_STD)
|
| 253 |
do_convert_rgb = True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
disable_grouping = False
|
| 255 |
size_divisor: Optional[int] = None
|
| 256 |
|
| 257 |
def __init__(
|
| 258 |
self,
|
| 259 |
+
**kwargs: Unpack[IsaacImageProcessorFastKwargs],
|
| 260 |
) -> None:
|
| 261 |
super().__init__(**kwargs)
|
| 262 |
|
|
|
|
| 392 |
nhwc_images = image_batch.permute(0, 2, 3, 1)
|
| 393 |
nhwc_images = _compute_residual_p_frames(nhwc_images, is_p_frame=[False] * batch_size)
|
| 394 |
|
| 395 |
+
patches = torch_extract_patches(nhwc_images.permute(0, 3, 1, 2), patch_size, patch_size)
|
| 396 |
_, height_tokens, width_tokens, _ = patches.shape
|
| 397 |
|
| 398 |
token_grid = (
|
|
|
|
| 481 |
return packed_sequence_mask_function(packed_sequence_mask)
|
| 482 |
|
| 483 |
|
| 484 |
+
def create_document_attention_mask(
|
| 485 |
+
config: PretrainedConfig,
|
| 486 |
+
input_embeds: torch.Tensor,
|
| 487 |
cu_seqlens: Optional[torch.Tensor],
|
| 488 |
+
) -> Optional[Union[torch.Tensor, Any]]:
|
| 489 |
+
"""Materialize a backend-specific block-diagonal attention mask.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
|
| 491 |
+
This uses the standard `masking_utils` mask interface (same mechanism as Llama4),
|
| 492 |
+
so the returned object matches the selected attention backend (e.g. SDPA bool mask,
|
| 493 |
+
eager additive mask, or flex `BlockMask`).
|
| 494 |
+
"""
|
| 495 |
|
| 496 |
+
mask_function = document_mask_function_from_cu_seqlens(cu_seqlens)
|
| 497 |
+
if mask_function is None:
|
| 498 |
return None
|
| 499 |
|
| 500 |
+
seq_len = input_embeds.shape[1]
|
| 501 |
+
cache_position = torch.arange(seq_len, device=input_embeds.device, dtype=torch.long)
|
| 502 |
+
|
| 503 |
+
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
|
| 504 |
+
return mask_interface(
|
| 505 |
+
batch_size=input_embeds.shape[0],
|
| 506 |
+
cache_position=cache_position,
|
| 507 |
+
kv_length=seq_len,
|
| 508 |
+
kv_offset=0,
|
| 509 |
+
mask_function=mask_function,
|
| 510 |
+
attention_mask=None,
|
| 511 |
+
allow_is_causal_skip=False,
|
| 512 |
+
allow_is_bidirectional_skip=False,
|
| 513 |
+
dtype=input_embeds.dtype,
|
| 514 |
+
config=config,
|
| 515 |
+
use_vmap=False,
|
| 516 |
+
)
|
| 517 |
|
| 518 |
|
| 519 |
class IsaacVisionEmbeddings(nn.Module):
|
| 520 |
"""Adapter around SigLIP2 vision embeddings that consumes packed patch sequences."""
|
| 521 |
|
|
|
|
| 522 |
def __init__(self, config: IsaacVisionConfig):
|
| 523 |
super().__init__()
|
| 524 |
self.config = config
|
|
|
|
| 539 |
if packed_pixel_values is None:
|
| 540 |
return seq_patches.new_zeros((0, self.embed_dim))
|
| 541 |
|
|
|
|
| 542 |
target_dtype = self.patch_embedding.weight.dtype
|
| 543 |
patch_embeds = self.patch_embedding(packed_pixel_values.to(dtype=target_dtype))
|
| 544 |
|
|
|
|
| 554 |
embeddings = patch_embeds + resized_positional_embeddings
|
| 555 |
return self._unpack_from_batch(embeddings, seq_lengths)
|
| 556 |
|
|
|
|
| 557 |
@staticmethod
|
| 558 |
def resize_positional_embeddings(
|
| 559 |
positional_embeddings: torch.Tensor,
|
|
|
|
| 667 |
class IsaacVisionAttention(Siglip2Attention):
|
| 668 |
"""Custom attention that supports variable-length sequences with flash attention."""
|
| 669 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 670 |
def forward(
|
| 671 |
self,
|
| 672 |
hidden_states: torch.Tensor,
|
| 673 |
attention_mask: Optional[torch.Tensor] = None,
|
|
|
|
|
|
|
| 674 |
output_attentions: bool = False,
|
|
|
|
| 675 |
cu_seqlens: Optional[torch.Tensor] = None,
|
| 676 |
max_seqlen: Optional[int] = None,
|
| 677 |
**kwargs,
|
| 678 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 679 |
kwargs.pop("output_hidden_states", None)
|
| 680 |
kwargs.pop("return_dict", None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 681 |
|
| 682 |
+
batch_size, seq_length, embed_dim = hidden_states.shape
|
| 683 |
+
queries = self.q_proj(hidden_states)
|
| 684 |
+
keys = self.k_proj(hidden_states)
|
| 685 |
+
values = self.v_proj(hidden_states)
|
| 686 |
+
|
| 687 |
+
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
| 688 |
+
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
| 689 |
+
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
| 690 |
+
|
| 691 |
+
attn_impl = self.config._attn_implementation
|
| 692 |
+
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"]
|
| 693 |
+
if attn_impl != "sdpa":
|
| 694 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[attn_impl]
|
| 695 |
+
|
| 696 |
+
dropout = 0.0 if not self.training else self.dropout
|
| 697 |
+
attention_kwargs: dict[str, Any] = {
|
| 698 |
+
"is_causal": False,
|
| 699 |
+
"scaling": self.scale,
|
| 700 |
+
"dropout": dropout,
|
| 701 |
+
}
|
| 702 |
|
| 703 |
+
supports_varlen = cu_seqlens is not None and attn_impl in {
|
| 704 |
+
"flash_attention_2",
|
| 705 |
+
"flash_attention_3",
|
| 706 |
+
"flex_attention",
|
| 707 |
+
"paged|flash_attention_2",
|
| 708 |
+
"paged|flash_attention_3",
|
| 709 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 710 |
|
| 711 |
+
if output_attentions and attn_impl == "eager":
|
| 712 |
+
attention_kwargs["output_attentions"] = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 713 |
|
| 714 |
+
if supports_varlen:
|
| 715 |
+
if max_seqlen is not None:
|
| 716 |
+
max_q = max_k = int(max_seqlen)
|
| 717 |
+
elif cu_seqlens.numel() >= 2:
|
| 718 |
+
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
|
| 719 |
+
max_q = max_k = lengths.max() if lengths.numel() > 0 else seq_length
|
| 720 |
+
else:
|
| 721 |
+
max_q = max_k = seq_length
|
| 722 |
+
|
| 723 |
+
attention_kwargs.update(
|
| 724 |
+
{
|
| 725 |
+
"cu_seq_lens_q": cu_seqlens,
|
| 726 |
+
"cu_seq_lens_k": cu_seqlens,
|
| 727 |
+
"max_length_q": max_q,
|
| 728 |
+
"max_length_k": max_k,
|
| 729 |
+
}
|
| 730 |
)
|
| 731 |
+
|
| 732 |
+
attn_output, attn_weights = attention_interface(
|
| 733 |
+
self,
|
| 734 |
+
queries,
|
| 735 |
+
keys,
|
| 736 |
+
values,
|
| 737 |
+
attention_mask,
|
| 738 |
+
**attention_kwargs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 739 |
)
|
|
|
|
| 740 |
|
| 741 |
+
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 742 |
|
| 743 |
+
# Align projection inputs with parameter dtype to avoid mixed-dtype matmul errors
|
| 744 |
+
out_proj_dtype = self.out_proj.weight.dtype
|
| 745 |
+
if attn_output.dtype != out_proj_dtype:
|
| 746 |
+
attn_output = attn_output.to(out_proj_dtype)
|
| 747 |
|
| 748 |
+
attn_output = self.out_proj(attn_output)
|
| 749 |
+
if attn_output.dtype != hidden_states.dtype:
|
| 750 |
+
attn_output = attn_output.to(hidden_states.dtype)
|
| 751 |
|
| 752 |
+
return attn_output, attn_weights
|
|
|
|
| 753 |
|
| 754 |
|
| 755 |
class IsaacVisionEncoderLayer(Siglip2EncoderLayer):
|
|
|
|
| 776 |
Maximum document length referenced by `cu_seqlens`. Passed to FlashAttention so it can size temporary
|
| 777 |
buffers for packed variable-length attention.
|
| 778 |
"""
|
| 779 |
+
# Run attention directly so variable-length metadata reaches FlashAttention.
|
| 780 |
+
residual = hidden_states
|
| 781 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 782 |
+
attn_output, _ = self.self_attn(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 783 |
hidden_states,
|
| 784 |
attention_mask=attention_mask,
|
| 785 |
+
cu_seqlens=cu_seqlens,
|
| 786 |
+
max_seqlen=max_seqlen,
|
| 787 |
**kwargs,
|
| 788 |
)
|
| 789 |
+
hidden_states = residual + attn_output
|
| 790 |
+
|
| 791 |
+
residual = hidden_states
|
| 792 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 793 |
+
hidden_states = self.mlp(hidden_states)
|
| 794 |
+
hidden_states = residual + hidden_states
|
| 795 |
+
|
| 796 |
+
return hidden_states
|
| 797 |
|
| 798 |
|
| 799 |
class IsaacVisionEncoder(Siglip2Encoder):
|
|
|
|
| 803 |
super().__init__(config)
|
| 804 |
self.layers = nn.ModuleList([IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 805 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 806 |
@can_return_tuple
|
| 807 |
+
@check_model_inputs
|
| 808 |
def forward(
|
| 809 |
self,
|
| 810 |
inputs_embeds,
|
| 811 |
attention_mask: Optional[torch.Tensor] = None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 812 |
**kwargs: Unpack[TransformersKwargs],
|
| 813 |
):
|
| 814 |
+
hidden_states = inputs_embeds
|
| 815 |
+
for encoder_layer in self.layers:
|
| 816 |
+
hidden_states = encoder_layer(
|
| 817 |
+
hidden_states,
|
| 818 |
+
attention_mask,
|
| 819 |
+
**kwargs,
|
| 820 |
+
)
|
| 821 |
+
return BaseModelOutput(last_hidden_state=hidden_states)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 822 |
|
| 823 |
|
| 824 |
def create_pixel_shuffle_index_map(
|
|
|
|
| 914 |
Raises:
|
| 915 |
ValueError: If more than one batch item is provided.
|
| 916 |
"""
|
| 917 |
+
return_with_batch_dim = x.dim() == 3
|
| 918 |
+
if return_with_batch_dim:
|
| 919 |
if x.size(0) != 1:
|
| 920 |
raise AssertionError("Packed sequence is expected to have batch_size == 1")
|
| 921 |
+
embeddings = x.squeeze(0) # (seq, embed)
|
| 922 |
else:
|
| 923 |
+
embeddings = x # (seq, embed)
|
| 924 |
|
| 925 |
+
embed_dim = embeddings.size(-1)
|
| 926 |
scale_factor = int(scale_factor)
|
| 927 |
|
| 928 |
# Calculate seq_sizes from token_grids
|
|
|
|
| 933 |
seq_sizes=seq_sizes,
|
| 934 |
token_grids=token_grids,
|
| 935 |
scale_factor=scale_factor,
|
| 936 |
+
device=embeddings.device,
|
| 937 |
) # (new_seq, scale_factor**2)
|
| 938 |
|
| 939 |
# Gather → (new_seq, scale_factor**2, embed_dim)
|
| 940 |
+
gathered = embeddings[gather_idx] # fancy indexing keeps gradient
|
| 941 |
|
| 942 |
# Merge the scale_factor**2 group dimension into channels to finish the shuffle
|
| 943 |
out = gathered.reshape(gathered.size(0), embed_dim * scale_factor * scale_factor)
|
| 944 |
|
| 945 |
# Restore batch dimension if needed
|
| 946 |
+
if return_with_batch_dim:
|
| 947 |
out = out.unsqueeze(0)
|
| 948 |
return out
|
| 949 |
|
| 950 |
|
| 951 |
class IsaacVisionTransformer(nn.Module):
|
| 952 |
+
_supports_sdpa = True
|
| 953 |
+
|
| 954 |
def __init__(self, config: IsaacVisionConfig):
|
| 955 |
super().__init__()
|
| 956 |
self.config = config
|
|
|
|
| 972 |
# Generate cumulative sequence lengths for variable-length attention
|
| 973 |
cu_seqlens = torch.zeros(seq_sizes.size(0) + 1, dtype=torch.int32, device=hidden_states.device)
|
| 974 |
cu_seqlens[1:] = seq_sizes.cumsum(0)
|
| 975 |
+
|
| 976 |
+
attention_mask = create_document_attention_mask(self.config, hidden_states, cu_seqlens)
|
| 977 |
|
| 978 |
# Pass through encoder with variable-length attention parameters
|
| 979 |
encoder_outputs = self.encoder(
|
| 980 |
inputs_embeds=hidden_states,
|
| 981 |
+
attention_mask=attention_mask,
|
| 982 |
cu_seqlens=cu_seqlens,
|
|
|
|
|
|
|
| 983 |
)
|
| 984 |
hidden_states = encoder_outputs.last_hidden_state
|
| 985 |
|
|
|
|
| 998 |
return hidden_states
|
| 999 |
|
| 1000 |
|
| 1001 |
+
class IsaacMultiModalProjector(nn.Module):
|
| 1002 |
+
def __init__(self, config: IsaacConfig):
|
| 1003 |
+
super().__init__()
|
| 1004 |
+
self.vision_hidden_size = config.vision_config.hidden_size * (
|
| 1005 |
+
config.vision_config.pixel_shuffle_scale_factor**2
|
| 1006 |
+
)
|
| 1007 |
+
self.backbone_hidden_size = config.hidden_size
|
| 1008 |
+
self.linear_1 = nn.Linear(self.vision_hidden_size, 4 * self.vision_hidden_size, bias=False)
|
| 1009 |
+
self.silu = nn.SiLU()
|
| 1010 |
+
self.linear_2 = nn.Linear(4 * self.vision_hidden_size, self.backbone_hidden_size, bias=False)
|
| 1011 |
+
|
| 1012 |
+
def forward(self, image_features):
|
| 1013 |
+
hidden_states = self.linear_1(image_features)
|
| 1014 |
+
hidden_states = self.silu(hidden_states)
|
| 1015 |
+
hidden_states = self.linear_2(hidden_states)
|
| 1016 |
+
return hidden_states
|
| 1017 |
+
|
| 1018 |
+
|
| 1019 |
class IsaacVisionEmbedding(nn.Module):
|
| 1020 |
"""Vision embedding wrapper exposing tower and projector."""
|
| 1021 |
|
| 1022 |
+
_supports_sdpa = True
|
| 1023 |
+
|
| 1024 |
def __init__(self, config: IsaacConfig):
|
| 1025 |
super().__init__()
|
| 1026 |
vision_cfg = config.vision_config
|
|
|
|
| 1027 |
|
| 1028 |
self.vision_tower = IsaacVisionTransformer(vision_cfg)
|
| 1029 |
+
self.multimodal_projector = IsaacMultiModalProjector(config)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1030 |
|
| 1031 |
def forward(self, vision_tokens: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
|
| 1032 |
hidden_states = self.vision_tower(vision_tokens)
|
|
|
|
| 1123 |
return target_height, target_width
|
| 1124 |
|
| 1125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1126 |
class IsaacConfig(PretrainedConfig):
|
| 1127 |
"""Configuration class for Isaac multimodal model.
|
| 1128 |
|
|
|
|
| 1143 |
vision_token: str = "<image>",
|
| 1144 |
**kwargs,
|
| 1145 |
):
|
| 1146 |
+
attn_implementation = kwargs.get("attn_implementation")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1147 |
|
| 1148 |
+
if isinstance(text_config, dict):
|
| 1149 |
+
self.text_config = self.sub_configs["text_config"](**text_config)
|
| 1150 |
+
elif isinstance(text_config, Qwen3Config):
|
| 1151 |
+
self.text_config = text_config
|
| 1152 |
+
elif text_config is None:
|
| 1153 |
+
self.text_config = self.sub_configs["text_config"]()
|
| 1154 |
|
| 1155 |
+
# Seed RoPE parameters before base init so the shared mixin can standardize/validate them.
|
| 1156 |
+
self.rope_parameters = getattr(self.text_config, "rope_parameters", None)
|
| 1157 |
+
self.layer_types = getattr(self.text_config, "layer_types", None)
|
|
|
|
|
|
|
|
|
|
| 1158 |
|
| 1159 |
super().__init__(**kwargs)
|
| 1160 |
|
| 1161 |
+
# Keep rope parameters aligned between the composite and text sub-configs.
|
| 1162 |
+
self.text_config.rope_parameters = self.rope_parameters
|
|
|
|
|
|
|
| 1163 |
|
| 1164 |
+
# Mirror frequently accessed Qwen3 attributes at the composite config level
|
|
|
|
|
|
|
|
|
|
| 1165 |
self.vocab_size = self.text_config.vocab_size
|
| 1166 |
self.hidden_size = self.text_config.hidden_size
|
| 1167 |
self.num_hidden_layers = self.text_config.num_hidden_layers
|
|
|
|
| 1169 |
self.head_dim = self.text_config.head_dim
|
| 1170 |
self.hidden_act = self.text_config.hidden_act
|
| 1171 |
self.use_cache = self.text_config.use_cache
|
| 1172 |
+
self.rope_theta = self.rope_parameters["rope_theta"]
|
|
|
|
|
|
|
|
|
|
| 1173 |
|
| 1174 |
self.layer_types = getattr(self.text_config, "layer_types", None)
|
| 1175 |
layer_type_validation(self.layer_types, self.num_hidden_layers)
|
|
|
|
| 1182 |
elif vision_config is None:
|
| 1183 |
self.vision_config = self.sub_configs["vision_config"]()
|
| 1184 |
|
| 1185 |
+
# Propagate user-requested attention backend to the vision sub-config when provided.
|
| 1186 |
+
if attn_implementation is not None:
|
| 1187 |
+
if isinstance(attn_implementation, dict):
|
| 1188 |
+
vision_attn = attn_implementation.get("vision_config", attn_implementation.get("", None))
|
| 1189 |
+
else:
|
| 1190 |
+
vision_attn = attn_implementation
|
| 1191 |
+
if vision_attn is not None:
|
| 1192 |
+
self.vision_config._attn_implementation = vision_attn
|
| 1193 |
+
|
| 1194 |
# Vision normalization parameters
|
| 1195 |
self.vision_rescale_factor = float(vision_rescale_factor)
|
| 1196 |
|
|
|
|
| 1198 |
self.max_sequence_length = max_sequence_length
|
| 1199 |
self.vision_token = vision_token
|
| 1200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1201 |
def to_dict(self):
|
| 1202 |
output = super().to_dict()
|
| 1203 |
# Ensure nested configs round-trip through dict serialization
|
|
|
|
| 1259 |
class IsaacProcessor(ProcessorMixin):
|
| 1260 |
attributes = ["image_processor", "tokenizer"]
|
| 1261 |
image_processor_class = ("IsaacImageProcessorFast",)
|
| 1262 |
+
tokenizer_class = ("Qwen2Tokenizer",)
|
| 1263 |
|
| 1264 |
def __init__(
|
| 1265 |
self,
|
|
|
|
| 1302 |
def build_event_stream_simple(
|
| 1303 |
self,
|
| 1304 |
text: str,
|
| 1305 |
+
images: Optional[list[Image]] = None,
|
| 1306 |
) -> Stream:
|
| 1307 |
events = []
|
| 1308 |
# Process text and images
|
|
|
|
| 1348 |
def __call__(
|
| 1349 |
self,
|
| 1350 |
text: Union[str, list[str]],
|
| 1351 |
+
images: Optional[Union[Image, list[Image]]] = None,
|
| 1352 |
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
| 1353 |
**kwargs,
|
| 1354 |
) -> BatchFeature:
|
|
|
|
| 1369 |
texts = text
|
| 1370 |
|
| 1371 |
if images is not None:
|
| 1372 |
+
if isinstance(images, Image):
|
| 1373 |
images_list = [images]
|
| 1374 |
else:
|
| 1375 |
images_list = images
|
|
|
|
| 1439 |
return position_ids
|
| 1440 |
|
| 1441 |
|
| 1442 |
+
class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding):
|
| 1443 |
EXTRA_ROPE_KEYS = {"mrope_section", "mrope_interleaved"}
|
| 1444 |
|
| 1445 |
def __init__(self, config: IsaacConfig, device=None):
|
|
|
|
|
|
|
| 1446 |
rope_source_cfg = config.get_text_config() if hasattr(config, "get_text_config") else config
|
| 1447 |
rope_scaling = getattr(rope_source_cfg, "rope_scaling", None) or {}
|
| 1448 |
|
|
|
|
| 1451 |
config_for_rope.rope_scaling = sanitized_scaling if sanitized_scaling else None
|
| 1452 |
|
| 1453 |
init_device = device if device is not None and getattr(device, "type", None) != "meta" else None
|
| 1454 |
+
super().__init__(config_for_rope, device=init_device)
|
| 1455 |
|
| 1456 |
+
rotary_half_dim = self.inv_freq.shape[0]
|
| 1457 |
self.mrope_section = self._resolve_mrope_section(rope_scaling.get("mrope_section"), rotary_half_dim)
|
| 1458 |
self.hidden_size = getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size
|
| 1459 |
|
|
|
|
| 1479 |
chunks = tensor.split(split_sections, dim=-1)
|
| 1480 |
return torch.cat([chunk[i % 3] for i, chunk in enumerate(chunks)], dim=-1)
|
| 1481 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1482 |
def forward(
|
| 1483 |
self,
|
| 1484 |
position_ids: torch.Tensor,
|
|
|
|
| 1502 |
|
| 1503 |
with torch.no_grad():
|
| 1504 |
pos = position_ids.clone()
|
| 1505 |
+
image_value = VisionType.image.value if VisionType is not None else 1
|
| 1506 |
+
not_spatial = modality_tensor != image_value
|
| 1507 |
if not_spatial.any():
|
| 1508 |
data_1d = pos[not_spatial][..., 0].unsqueeze(-1)
|
| 1509 |
pos[not_spatial] = data_1d.expand(-1, pos.shape[-1])
|
| 1510 |
|
| 1511 |
pos_axes = pos.permute(2, 0, 1).contiguous()
|
| 1512 |
|
| 1513 |
+
cos_axes, sin_axes = super().forward(hidden_states, pos_axes)
|
| 1514 |
|
| 1515 |
cos_axes = cos_axes.to(hidden_states.dtype)
|
| 1516 |
sin_axes = sin_axes.to(hidden_states.dtype)
|
|
|
|
| 1523 |
|
| 1524 |
class IsaacModel(Qwen3PreTrainedModel):
|
| 1525 |
supports_gradient_checkpointing = True
|
| 1526 |
+
_can_compile_fullgraph = False
|
| 1527 |
+
_supports_flex_attn = False
|
| 1528 |
+
_can_record_outputs = {"attentions": OutputRecorder(IsaacVisionAttention, index=1)}
|
| 1529 |
+
# Expose tied-weights mapping even if empty for base model tests.
|
| 1530 |
+
all_tied_weights_keys: dict[str, str] = {}
|
| 1531 |
|
| 1532 |
def __init__(self, config: IsaacConfig):
|
| 1533 |
Qwen3PreTrainedModel.__init__(self, config)
|
|
|
|
| 1544 |
raise ValueError("IsaacConfig should always have vision_config")
|
| 1545 |
|
| 1546 |
self.vision_embedding = IsaacVisionEmbedding(config)
|
| 1547 |
+
self.vision_embedding._supports_sdpa = True
|
| 1548 |
|
| 1549 |
# Dispatch table for TensorStream balanced embedding (text + vision)
|
| 1550 |
self.embed_fns = {
|
|
|
|
| 1557 |
self.vision_rescale_factor = config.vision_rescale_factor
|
| 1558 |
self.vision_token = config.vision_token
|
| 1559 |
|
| 1560 |
+
# Initialize weights and parallel plans (including tp_plan from the text model)
|
| 1561 |
+
self.post_init()
|
| 1562 |
+
|
| 1563 |
+
# Respect config-specified gradient checkpointing
|
| 1564 |
+
if getattr(config, "gradient_checkpointing", False):
|
| 1565 |
+
self.gradient_checkpointing_enable()
|
| 1566 |
+
|
| 1567 |
def get_input_embeddings(self) -> nn.Module:
|
| 1568 |
return self.text_model.get_input_embeddings()
|
| 1569 |
|
| 1570 |
def set_input_embeddings(self, value: nn.Module) -> None:
|
| 1571 |
self.text_model.set_input_embeddings(value)
|
| 1572 |
+
vocab_size = getattr(value, "num_embeddings", None)
|
| 1573 |
+
if vocab_size is not None:
|
| 1574 |
+
self.config.vocab_size = vocab_size
|
| 1575 |
+
if hasattr(self.config, "text_config"):
|
| 1576 |
+
self.config.text_config.vocab_size = vocab_size
|
| 1577 |
+
self.text_model.config.vocab_size = vocab_size
|
| 1578 |
|
| 1579 |
@property
|
| 1580 |
def embed_tokens(self) -> nn.Module:
|
|
|
|
| 1585 |
self.text_model.embed_tokens = value
|
| 1586 |
|
| 1587 |
@property
|
| 1588 |
+
def vision_model(self) -> nn.Module:
|
| 1589 |
+
return self.vision_embedding.vision_tower
|
| 1590 |
|
| 1591 |
@property
|
| 1592 |
+
def vision_model(self) -> nn.Module:
|
| 1593 |
+
return self.vision_embedding.vision_tower
|
| 1594 |
+
|
| 1595 |
+
@property
|
| 1596 |
+
def vision_tower(self) -> nn.Module:
|
| 1597 |
+
return self.vision_embedding.vision_tower
|
| 1598 |
|
| 1599 |
def embed_text_tokens(self, token_ids: torch.Tensor) -> torch.Tensor:
|
| 1600 |
"""Embed text tokens, squeezing singleton dimensions."""
|
|
|
|
| 1643 |
h = embedded_ts.compact() # (B, T, D)
|
| 1644 |
return h
|
| 1645 |
|
| 1646 |
+
@staticmethod
|
| 1647 |
+
def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor:
|
| 1648 |
+
return compute_position_ids_input_ids(input_ids)
|
| 1649 |
+
|
| 1650 |
+
def _prepare_position_and_modality(
|
| 1651 |
+
self,
|
| 1652 |
+
position_ids: Optional[torch.LongTensor],
|
| 1653 |
+
modality_tensor: Optional[torch.LongTensor],
|
| 1654 |
+
tensor_stream: Optional[TensorStream],
|
| 1655 |
+
inputs_embeds: torch.Tensor,
|
| 1656 |
+
cache_position: torch.LongTensor,
|
| 1657 |
+
) -> tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor, torch.Tensor, torch.Tensor]:
|
| 1658 |
+
text_value = TextType.text.value if TextType is not None else 0
|
| 1659 |
+
batch_size, seq_len = inputs_embeds.shape[:2]
|
| 1660 |
+
|
| 1661 |
+
if modality_tensor is None:
|
| 1662 |
+
if tensor_stream is not None:
|
| 1663 |
+
modality_tensor = modality_mask(tensor_stream)
|
| 1664 |
+
else:
|
| 1665 |
+
modality_tensor = torch.full(
|
| 1666 |
+
(batch_size, seq_len), text_value, device=inputs_embeds.device, dtype=torch.long
|
| 1667 |
+
)
|
| 1668 |
+
else:
|
| 1669 |
+
modality_tensor = modality_tensor.to(device=inputs_embeds.device, dtype=torch.long)
|
| 1670 |
+
expected_shape = (batch_size, seq_len)
|
| 1671 |
+
if modality_tensor.shape != torch.Size(expected_shape):
|
| 1672 |
+
raise ValueError(
|
| 1673 |
+
f"modality_tensor must have shape (batch_size, seq_len) {expected_shape}, "
|
| 1674 |
+
f"but got {tuple(modality_tensor.shape)}"
|
| 1675 |
+
)
|
| 1676 |
+
|
| 1677 |
+
if position_ids is None:
|
| 1678 |
+
if tensor_stream is not None:
|
| 1679 |
+
position_ids = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
|
| 1680 |
+
else:
|
| 1681 |
+
position_ids = cache_position.view(1, -1).expand(modality_tensor.shape[0], -1)
|
| 1682 |
+
|
| 1683 |
+
if position_ids.ndim == 2:
|
| 1684 |
+
position_ids = position_ids.to(device=inputs_embeds.device)
|
| 1685 |
+
position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
|
| 1686 |
+
|
| 1687 |
+
if position_ids.shape[1] != seq_len:
|
| 1688 |
+
start_positions = position_ids[:, :1, 0]
|
| 1689 |
+
position_ids = torch.arange(seq_len, device=inputs_embeds.device).view(1, -1)
|
| 1690 |
+
position_ids = position_ids + start_positions
|
| 1691 |
+
position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3)
|
| 1692 |
+
|
| 1693 |
+
cos, sin = self.rotary_emb(
|
| 1694 |
+
position_ids,
|
| 1695 |
+
modality_tensor,
|
| 1696 |
+
hidden_states=inputs_embeds,
|
| 1697 |
+
)
|
| 1698 |
+
|
| 1699 |
+
decoder_position_ids = position_ids[..., 0] if position_ids.ndim == 3 else position_ids
|
| 1700 |
+
return position_ids, modality_tensor, decoder_position_ids, cos, sin
|
| 1701 |
+
|
| 1702 |
@auto_docstring
|
| 1703 |
@check_model_inputs
|
| 1704 |
def forward(
|
|
|
|
| 1711 |
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
| 1712 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1713 |
use_cache: Optional[bool] = None,
|
|
|
|
|
|
|
| 1714 |
cache_position: Optional[torch.LongTensor] = None,
|
| 1715 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1716 |
) -> tuple | BaseModelOutputWithPast:
|
| 1717 |
"""
|
| 1718 |
Forward pass with MRoPE position embeddings.
|
|
|
|
| 1730 |
omitted.
|
| 1731 |
"""
|
| 1732 |
|
| 1733 |
+
output_attentions = kwargs.pop("output_attentions", None)
|
| 1734 |
+
|
| 1735 |
# Get inputs
|
| 1736 |
if tensor_stream is not None and inputs_embeds is not None:
|
| 1737 |
raise ValueError("You cannot specify both tensor_stream and inputs_embeds")
|
| 1738 |
+
if tensor_stream is None and input_ids is not None and inputs_embeds is not None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1739 |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 1740 |
+
|
| 1741 |
+
# Resolve the input source (TensorStream takes precedence over token ids).
|
| 1742 |
+
if tensor_stream is not None:
|
| 1743 |
+
inputs_embeds = self.embed_stream(tensor_stream)
|
| 1744 |
elif input_ids is not None:
|
| 1745 |
inputs_embeds = self.text_model.embed_tokens(input_ids)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1746 |
elif inputs_embeds is None:
|
| 1747 |
raise ValueError("You have to specify either tensor_stream, input_ids or inputs_embeds")
|
| 1748 |
|
| 1749 |
+
batch_size, seq_len = inputs_embeds.shape[:2]
|
| 1750 |
+
|
| 1751 |
# Ensure cache exists when requested
|
| 1752 |
if use_cache and past_key_values is None:
|
| 1753 |
cache_config = self.config.get_text_config() if hasattr(self.config, "get_text_config") else self.config
|
| 1754 |
past_key_values = DynamicCache(config=cache_config)
|
| 1755 |
|
| 1756 |
+
if cache_position is None:
|
| 1757 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 1758 |
+
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_len, device=inputs_embeds.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1759 |
|
| 1760 |
+
if attention_mask is None:
|
| 1761 |
+
attention_mask = torch.ones((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1762 |
|
| 1763 |
+
position_ids, modality_tensor, decoder_position_ids, cos, sin = self._prepare_position_and_modality(
|
| 1764 |
+
position_ids=position_ids,
|
| 1765 |
+
modality_tensor=modality_tensor,
|
| 1766 |
+
tensor_stream=tensor_stream,
|
| 1767 |
+
inputs_embeds=inputs_embeds,
|
| 1768 |
+
cache_position=cache_position,
|
| 1769 |
)
|
|
|
|
|
|
|
| 1770 |
|
| 1771 |
# Prepare attention mask
|
|
|
|
| 1772 |
if not isinstance(attention_mask, dict):
|
| 1773 |
+
attention_mask = create_masks_for_generate(
|
| 1774 |
+
config=self.config,
|
| 1775 |
+
input_embeds=inputs_embeds,
|
| 1776 |
+
attention_mask=attention_mask,
|
| 1777 |
+
cache_position=cache_position,
|
| 1778 |
+
past_key_values=past_key_values,
|
| 1779 |
+
position_ids=decoder_position_ids,
|
| 1780 |
+
)
|
| 1781 |
+
|
| 1782 |
+
is_attention_mask_dict = isinstance(attention_mask, dict)
|
| 1783 |
|
| 1784 |
# Initialize hidden states
|
| 1785 |
hidden_states = inputs_embeds
|
| 1786 |
+
all_attentions = [] if output_attentions else None
|
| 1787 |
|
| 1788 |
for decoder_layer in self.text_model.layers:
|
| 1789 |
layer_attention_mask = (
|
| 1790 |
+
attention_mask[decoder_layer.attention_type] if is_attention_mask_dict else attention_mask
|
| 1791 |
)
|
| 1792 |
layer_outputs = decoder_layer(
|
| 1793 |
hidden_states,
|
| 1794 |
attention_mask=layer_attention_mask,
|
| 1795 |
+
position_ids=decoder_position_ids,
|
| 1796 |
past_key_values=past_key_values,
|
| 1797 |
use_cache=use_cache,
|
| 1798 |
cache_position=cache_position,
|
| 1799 |
position_embeddings=(cos, sin),
|
| 1800 |
+
output_attentions=output_attentions,
|
| 1801 |
**kwargs,
|
| 1802 |
)
|
| 1803 |
|
| 1804 |
+
layer_outputs_is_tuple = isinstance(layer_outputs, tuple)
|
| 1805 |
+
hidden_states = layer_outputs[0] if layer_outputs_is_tuple else layer_outputs
|
| 1806 |
+
if output_attentions and layer_outputs_is_tuple:
|
| 1807 |
+
all_attentions.append(layer_outputs[1])
|
| 1808 |
|
| 1809 |
# Final layer norm
|
| 1810 |
hidden_states = self.text_model.norm(hidden_states)
|
|
|
|
| 1812 |
return BaseModelOutputWithPast(
|
| 1813 |
last_hidden_state=hidden_states,
|
| 1814 |
past_key_values=past_key_values,
|
| 1815 |
+
hidden_states=(hidden_states,),
|
| 1816 |
+
attentions=tuple(all_attentions) if output_attentions else None,
|
| 1817 |
)
|
| 1818 |
|
| 1819 |
|
|
|
|
| 1821 |
"""Isaac multimodal model for conditional generation."""
|
| 1822 |
|
| 1823 |
config_class = IsaacConfig
|
| 1824 |
+
_can_compile_fullgraph = False
|
| 1825 |
+
_tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
|
| 1826 |
+
all_tied_weights_keys: dict[str, str] = {"lm_head.weight": "model.text_model.embed_tokens.weight"}
|
| 1827 |
|
| 1828 |
def __init__(self, config: IsaacConfig):
|
| 1829 |
super().__init__(config)
|
|
|
|
| 1833 |
# Tracks rotary position offsets computed during a full forward pass so decode steps can reuse them.
|
| 1834 |
self.rope_deltas = None
|
| 1835 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1836 |
def forward(
|
| 1837 |
self,
|
| 1838 |
input_ids: Optional[torch.LongTensor] = None,
|
|
|
|
| 1843 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1844 |
labels: Optional[torch.LongTensor] = None,
|
| 1845 |
use_cache: Optional[bool] = None,
|
|
|
|
|
|
|
| 1846 |
cache_position: Optional[torch.LongTensor] = None,
|
| 1847 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1848 |
) -> tuple | CausalLMOutputWithPast:
|
| 1849 |
r"""
|
| 1850 |
Forward pass for conditional generation supporting both standard inputs and TensorStream.
|
|
|
|
| 1855 |
`input_ids`.
|
| 1856 |
"""
|
| 1857 |
|
| 1858 |
+
output_attentions = kwargs.pop("output_attentions", None)
|
| 1859 |
+
|
| 1860 |
+
# Don't compute embeddings here - let the inner model handle it
|
| 1861 |
if tensor_stream is not None:
|
| 1862 |
input_ids = None
|
| 1863 |
if input_ids is None and inputs_embeds is None and tensor_stream is None:
|
| 1864 |
raise ValueError("Either input_ids, inputs_embeds, or tensor_stream must be provided.")
|
| 1865 |
|
| 1866 |
+
# Record rope deltas on prefill when TensorStream is provided; leave position_ids building to IsaacModel.
|
|
|
|
|
|
|
| 1867 |
if position_ids is None and tensor_stream is not None:
|
| 1868 |
position_ids, self.rope_deltas = self.get_rope_index(input_ids, tensor_stream, attention_mask)
|
| 1869 |
+
elif position_ids is None and cache_position is not None and self.rope_deltas is not None:
|
| 1870 |
+
# Decode continuation after TensorStream prefill: advance positions using cached rope offsets.
|
| 1871 |
+
if input_ids is not None:
|
| 1872 |
+
base_position_ids = compute_position_ids_input_ids(input_ids)
|
|
|
|
|
|
|
|
|
|
| 1873 |
else:
|
| 1874 |
+
if inputs_embeds is None:
|
| 1875 |
+
raise ValueError("inputs_embeds must be provided when input_ids is None during decode")
|
| 1876 |
+
batch_size, seq_len = inputs_embeds.shape[:2]
|
| 1877 |
+
dummy_ids = torch.zeros((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long)
|
| 1878 |
+
base_position_ids = compute_position_ids_input_ids(dummy_ids)
|
| 1879 |
|
| 1880 |
+
rope_delta = (cache_position[0] + self.rope_deltas).to(base_position_ids.device)
|
| 1881 |
+
if not isinstance(rope_delta, int):
|
| 1882 |
+
rope_delta = rope_delta.repeat_interleave(base_position_ids.shape[0] // rope_delta.shape[0], dim=0)
|
| 1883 |
+
position_ids = base_position_ids.add(rope_delta)
|
|
|
|
| 1884 |
|
| 1885 |
outputs = self.model(
|
| 1886 |
input_ids=input_ids,
|
| 1887 |
tensor_stream=tensor_stream,
|
| 1888 |
attention_mask=attention_mask,
|
| 1889 |
position_ids=position_ids,
|
| 1890 |
+
modality_tensor=None,
|
| 1891 |
past_key_values=past_key_values,
|
| 1892 |
inputs_embeds=inputs_embeds,
|
| 1893 |
use_cache=use_cache,
|
| 1894 |
+
output_attentions=output_attentions,
|
|
|
|
| 1895 |
cache_position=cache_position,
|
| 1896 |
**kwargs,
|
| 1897 |
)
|
|
|
|
| 1908 |
logits=logits,
|
| 1909 |
past_key_values=outputs.past_key_values,
|
| 1910 |
hidden_states=outputs.hidden_states,
|
| 1911 |
+
attentions=outputs.attentions if output_attentions else None,
|
| 1912 |
)
|
| 1913 |
|
| 1914 |
+
def set_input_embeddings(self, value: nn.Module) -> None:
|
| 1915 |
+
self.model.set_input_embeddings(value)
|
| 1916 |
+
vocab_size = getattr(value, "num_embeddings", None)
|
| 1917 |
+
if vocab_size is not None:
|
| 1918 |
+
self.config.vocab_size = vocab_size
|
| 1919 |
+
self.model.config.vocab_size = vocab_size
|
| 1920 |
+
if hasattr(self.model, "text_model"):
|
| 1921 |
+
self.model.text_model.config.vocab_size = vocab_size
|
| 1922 |
+
if self.lm_head.weight.shape[0] != vocab_size:
|
| 1923 |
+
self.lm_head = nn.Linear(self.config.hidden_size, vocab_size, bias=False)
|
| 1924 |
+
if hasattr(self.model, "embed_tokens"):
|
| 1925 |
+
self.lm_head.weight = self.model.text_model.embed_tokens.weight
|
| 1926 |
+
|
| 1927 |
+
def get_rope_index(
|
| 1928 |
+
self,
|
| 1929 |
+
input_ids: Optional[torch.Tensor],
|
| 1930 |
+
tensor_stream: Optional[TensorStream],
|
| 1931 |
+
attention_mask: Optional[torch.Tensor],
|
| 1932 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1933 |
+
"""Compute MRoPE position ids from a TensorStream (or 1D fallback).
|
| 1934 |
+
|
| 1935 |
+
Returns (position_ids, rope_deltas). position_ids is (B,L,3) for MRoPE.
|
| 1936 |
+
rope_deltas is (B,1) used to advance positions in decode.
|
| 1937 |
+
"""
|
| 1938 |
+
# tensor_stream present: compute 3D coords
|
| 1939 |
+
if tensor_stream is None and input_ids is None:
|
| 1940 |
+
raise ValueError("`tensor_stream` or `input_ids` must be provided to compute rope indices")
|
| 1941 |
+
|
| 1942 |
+
if tensor_stream is not None:
|
| 1943 |
+
pos_3d = compute_mrope_pos_tensor(tensor_stream) # (B,L,3)
|
| 1944 |
+
else:
|
| 1945 |
+
pos_3d = compute_position_ids_input_ids(input_ids)
|
| 1946 |
+
B, L, _ = pos_3d.shape
|
| 1947 |
+
|
| 1948 |
+
# Max position per batch across the 3 planes and sequence dimension: (B,)
|
| 1949 |
+
m_per_batch = pos_3d.amax(dim=(1, 2))
|
| 1950 |
+
|
| 1951 |
+
# Sequence lengths per batch: (B,)
|
| 1952 |
+
if attention_mask is None:
|
| 1953 |
+
seq_lens = torch.full_like(m_per_batch, L)
|
| 1954 |
+
else:
|
| 1955 |
+
seq_lens = attention_mask.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=m_per_batch.device)
|
| 1956 |
+
|
| 1957 |
+
rope_deltas = (m_per_batch + 1 - seq_lens).to(dtype=pos_3d.dtype).unsqueeze(1)
|
| 1958 |
+
return pos_3d, rope_deltas
|
| 1959 |
+
|
| 1960 |
def prepare_inputs_for_generation(
|
| 1961 |
self,
|
| 1962 |
input_ids: torch.LongTensor,
|
|
|
|
| 2003 |
|
| 2004 |
cache_position = model_inputs.get("cache_position", cache_position)
|
| 2005 |
|
| 2006 |
+
# Handle TensorStream only for the prefill step
|
| 2007 |
+
first_step = cache_position is None or cache_position[0] == 0
|
| 2008 |
+
if tensor_stream is not None and first_step:
|
| 2009 |
model_inputs["tensor_stream"] = tensor_stream
|
| 2010 |
+
# Let forward rebuild MRoPE coordinates from the TensorStream
|
| 2011 |
+
model_inputs["position_ids"] = None
|
| 2012 |
+
else:
|
|
|
|
| 2013 |
model_inputs["tensor_stream"] = None
|
|
|
|
| 2014 |
|
| 2015 |
+
# TensorStream decode path: preserve rotary offsets from prefill; let forward rebuild positions
|
| 2016 |
+
if tensor_stream is not None and not first_step and self.rope_deltas is not None:
|
| 2017 |
+
model_inputs["position_ids"] = None
|
| 2018 |
+
return model_inputs
|
| 2019 |
|
| 2020 |
+
return model_inputs
|
| 2021 |
|
| 2022 |
+
@classmethod
|
| 2023 |
+
def can_generate(cls) -> bool:
|
| 2024 |
+
return True
|
|
|
|
|
|
|
| 2025 |
|
| 2026 |
|
| 2027 |
def _compute_residual_p_frames(frames: torch.Tensor, is_p_frame: list[bool]) -> torch.Tensor:
|
processor_config.json
CHANGED
|
@@ -7,9 +7,7 @@
|
|
| 7 |
"auto_map": {
|
| 8 |
"AutoProcessor": "modular_isaac.IsaacProcessor"
|
| 9 |
},
|
| 10 |
-
"crop_size": null,
|
| 11 |
"data_format": "channels_first",
|
| 12 |
-
"device": null,
|
| 13 |
"disable_grouping": false,
|
| 14 |
"do_center_crop": false,
|
| 15 |
"do_convert_rgb": true,
|
|
@@ -23,23 +21,17 @@
|
|
| 23 |
0.5
|
| 24 |
],
|
| 25 |
"image_processor_type": "IsaacImageProcessorFast",
|
| 26 |
-
"image_seq_length": null,
|
| 27 |
"image_std": [
|
| 28 |
0.5,
|
| 29 |
0.5,
|
| 30 |
0.5
|
| 31 |
],
|
| 32 |
-
"input_data_format": null,
|
| 33 |
"max_num_patches": 6144,
|
| 34 |
"min_num_patches": 256,
|
| 35 |
-
"pad_size": null,
|
| 36 |
"patch_size": 16,
|
| 37 |
"pixel_shuffle_scale": 2,
|
| 38 |
-
"processor_class": "IsaacProcessor",
|
| 39 |
"resample": 2,
|
| 40 |
-
"rescale_factor": 0.00392156862745098
|
| 41 |
-
"return_tensors": null,
|
| 42 |
-
"size": null
|
| 43 |
},
|
| 44 |
"max_sequence_length": 16384,
|
| 45 |
"processor_class": "IsaacProcessor",
|
|
|
|
| 7 |
"auto_map": {
|
| 8 |
"AutoProcessor": "modular_isaac.IsaacProcessor"
|
| 9 |
},
|
|
|
|
| 10 |
"data_format": "channels_first",
|
|
|
|
| 11 |
"disable_grouping": false,
|
| 12 |
"do_center_crop": false,
|
| 13 |
"do_convert_rgb": true,
|
|
|
|
| 21 |
0.5
|
| 22 |
],
|
| 23 |
"image_processor_type": "IsaacImageProcessorFast",
|
|
|
|
| 24 |
"image_std": [
|
| 25 |
0.5,
|
| 26 |
0.5,
|
| 27 |
0.5
|
| 28 |
],
|
|
|
|
| 29 |
"max_num_patches": 6144,
|
| 30 |
"min_num_patches": 256,
|
|
|
|
| 31 |
"patch_size": 16,
|
| 32 |
"pixel_shuffle_scale": 2,
|
|
|
|
| 33 |
"resample": 2,
|
| 34 |
+
"rescale_factor": 0.00392156862745098
|
|
|
|
|
|
|
| 35 |
},
|
| 36 |
"max_sequence_length": 16384,
|
| 37 |
"processor_class": "IsaacProcessor",
|
special_tokens_map.json
DELETED
|
@@ -1,31 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"additional_special_tokens": [
|
| 3 |
-
"<|im_start|>",
|
| 4 |
-
"<|im_end|>",
|
| 5 |
-
"<|object_ref_start|>",
|
| 6 |
-
"<|object_ref_end|>",
|
| 7 |
-
"<|box_start|>",
|
| 8 |
-
"<|box_end|>",
|
| 9 |
-
"<|quad_start|>",
|
| 10 |
-
"<|quad_end|>",
|
| 11 |
-
"<|vision_start|>",
|
| 12 |
-
"<|vision_end|>",
|
| 13 |
-
"<|vision_pad|>",
|
| 14 |
-
"<|image_pad|>",
|
| 15 |
-
"<|video_pad|>"
|
| 16 |
-
],
|
| 17 |
-
"eos_token": {
|
| 18 |
-
"content": "<|im_end|>",
|
| 19 |
-
"lstrip": false,
|
| 20 |
-
"normalized": false,
|
| 21 |
-
"rstrip": false,
|
| 22 |
-
"single_word": false
|
| 23 |
-
},
|
| 24 |
-
"pad_token": {
|
| 25 |
-
"content": "<|endoftext|>",
|
| 26 |
-
"lstrip": false,
|
| 27 |
-
"normalized": false,
|
| 28 |
-
"rstrip": false,
|
| 29 |
-
"single_word": false
|
| 30 |
-
}
|
| 31 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer.json
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6a069d8afc5e4604a1d15db8b4678d9a804bda3991fe2822cf350ec571084f2
|
| 3 |
+
size 11473537
|
tokenizer_config.json
CHANGED
|
@@ -1,7 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"add_bos_token": false,
|
| 3 |
"add_prefix_space": false,
|
| 4 |
-
"additional_special_tokens": null,
|
| 5 |
"auto_map": {
|
| 6 |
"AutoProcessor": "modular_isaac.IsaacProcessor"
|
| 7 |
},
|
|
|
|
| 1 |
{
|
|
|
|
| 2 |
"add_prefix_space": false,
|
|
|
|
| 3 |
"auto_map": {
|
| 4 |
"AutoProcessor": "modular_isaac.IsaacProcessor"
|
| 5 |
},
|
vocab.json
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|