thisisiron commited on
Commit
6854465
·
verified ·
1 Parent(s): 3666803

Upload Ovis2ForConditionalGeneration

Browse files
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe45868f9e8340da3df20dfc41839ac5ce1b4301ce6c49b9ff189853e7b3512e
3
- size 4999702488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfc7f6b32d60b91e83cbefe96970855d0b7371297fd118c162a3a3bcb0736eb5
3
+ size 4999702392
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73d19f486b1d9abade4f715a3a67a392577b565d97b1d3d1e0fc02c4428082ec
3
- size 804576872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c84e4fdf6e06ad30bb3a4a64186a30ef0b6b6938782b29a0288b30c3752d4cd1
3
+ size 804576880
model.safetensors.index.json CHANGED
@@ -349,223 +349,223 @@
349
  "model.vision_tower.transformer.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
350
  "model.vision_tower.transformer.embeddings.position_embedding.weight": "model-00001-of-00002.safetensors",
351
  "model.vision_tower.transformer.embeddings.rms_norm.weight": "model-00001-of-00002.safetensors",
352
- "model.vision_tower.transformer.encoder.layers.0.layer_norm1.weight": "model-00001-of-00002.safetensors",
353
- "model.vision_tower.transformer.encoder.layers.0.layer_norm2.weight": "model-00001-of-00002.safetensors",
354
- "model.vision_tower.transformer.encoder.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
355
- "model.vision_tower.transformer.encoder.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
356
- "model.vision_tower.transformer.encoder.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
357
- "model.vision_tower.transformer.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
358
- "model.vision_tower.transformer.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
359
- "model.vision_tower.transformer.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
360
- "model.vision_tower.transformer.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
361
- "model.vision_tower.transformer.encoder.layers.1.layer_norm1.weight": "model-00001-of-00002.safetensors",
362
- "model.vision_tower.transformer.encoder.layers.1.layer_norm2.weight": "model-00001-of-00002.safetensors",
363
- "model.vision_tower.transformer.encoder.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
364
- "model.vision_tower.transformer.encoder.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
365
- "model.vision_tower.transformer.encoder.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
366
- "model.vision_tower.transformer.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
367
- "model.vision_tower.transformer.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
368
- "model.vision_tower.transformer.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
369
- "model.vision_tower.transformer.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
370
- "model.vision_tower.transformer.encoder.layers.10.layer_norm1.weight": "model-00001-of-00002.safetensors",
371
- "model.vision_tower.transformer.encoder.layers.10.layer_norm2.weight": "model-00001-of-00002.safetensors",
372
- "model.vision_tower.transformer.encoder.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
373
- "model.vision_tower.transformer.encoder.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
374
- "model.vision_tower.transformer.encoder.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
375
- "model.vision_tower.transformer.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
376
- "model.vision_tower.transformer.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
377
- "model.vision_tower.transformer.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
378
- "model.vision_tower.transformer.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
379
- "model.vision_tower.transformer.encoder.layers.11.layer_norm1.weight": "model-00001-of-00002.safetensors",
380
- "model.vision_tower.transformer.encoder.layers.11.layer_norm2.weight": "model-00001-of-00002.safetensors",
381
- "model.vision_tower.transformer.encoder.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
382
- "model.vision_tower.transformer.encoder.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
383
- "model.vision_tower.transformer.encoder.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
384
- "model.vision_tower.transformer.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
385
- "model.vision_tower.transformer.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
386
- "model.vision_tower.transformer.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
387
- "model.vision_tower.transformer.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
388
- "model.vision_tower.transformer.encoder.layers.12.layer_norm1.weight": "model-00001-of-00002.safetensors",
389
- "model.vision_tower.transformer.encoder.layers.12.layer_norm2.weight": "model-00001-of-00002.safetensors",
390
- "model.vision_tower.transformer.encoder.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
391
- "model.vision_tower.transformer.encoder.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
392
- "model.vision_tower.transformer.encoder.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
393
- "model.vision_tower.transformer.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
394
- "model.vision_tower.transformer.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
395
- "model.vision_tower.transformer.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
396
- "model.vision_tower.transformer.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
397
- "model.vision_tower.transformer.encoder.layers.13.layer_norm1.weight": "model-00001-of-00002.safetensors",
398
- "model.vision_tower.transformer.encoder.layers.13.layer_norm2.weight": "model-00001-of-00002.safetensors",
399
- "model.vision_tower.transformer.encoder.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
400
- "model.vision_tower.transformer.encoder.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
401
- "model.vision_tower.transformer.encoder.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
402
- "model.vision_tower.transformer.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
403
- "model.vision_tower.transformer.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
404
- "model.vision_tower.transformer.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
405
- "model.vision_tower.transformer.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
406
- "model.vision_tower.transformer.encoder.layers.14.layer_norm1.weight": "model-00001-of-00002.safetensors",
407
- "model.vision_tower.transformer.encoder.layers.14.layer_norm2.weight": "model-00001-of-00002.safetensors",
408
- "model.vision_tower.transformer.encoder.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
409
- "model.vision_tower.transformer.encoder.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
410
- "model.vision_tower.transformer.encoder.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
411
- "model.vision_tower.transformer.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
412
- "model.vision_tower.transformer.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
413
- "model.vision_tower.transformer.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
414
- "model.vision_tower.transformer.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
415
- "model.vision_tower.transformer.encoder.layers.15.layer_norm1.weight": "model-00001-of-00002.safetensors",
416
- "model.vision_tower.transformer.encoder.layers.15.layer_norm2.weight": "model-00001-of-00002.safetensors",
417
- "model.vision_tower.transformer.encoder.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
418
- "model.vision_tower.transformer.encoder.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
419
- "model.vision_tower.transformer.encoder.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
420
- "model.vision_tower.transformer.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
421
- "model.vision_tower.transformer.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
422
- "model.vision_tower.transformer.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
423
- "model.vision_tower.transformer.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
424
- "model.vision_tower.transformer.encoder.layers.16.layer_norm1.weight": "model-00001-of-00002.safetensors",
425
- "model.vision_tower.transformer.encoder.layers.16.layer_norm2.weight": "model-00001-of-00002.safetensors",
426
- "model.vision_tower.transformer.encoder.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
427
- "model.vision_tower.transformer.encoder.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
428
- "model.vision_tower.transformer.encoder.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
429
- "model.vision_tower.transformer.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
430
- "model.vision_tower.transformer.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
431
- "model.vision_tower.transformer.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
432
- "model.vision_tower.transformer.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
433
- "model.vision_tower.transformer.encoder.layers.17.layer_norm1.weight": "model-00001-of-00002.safetensors",
434
- "model.vision_tower.transformer.encoder.layers.17.layer_norm2.weight": "model-00001-of-00002.safetensors",
435
- "model.vision_tower.transformer.encoder.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
436
- "model.vision_tower.transformer.encoder.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
437
- "model.vision_tower.transformer.encoder.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
438
- "model.vision_tower.transformer.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
439
- "model.vision_tower.transformer.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
440
- "model.vision_tower.transformer.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
441
- "model.vision_tower.transformer.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
442
- "model.vision_tower.transformer.encoder.layers.18.layer_norm1.weight": "model-00001-of-00002.safetensors",
443
- "model.vision_tower.transformer.encoder.layers.18.layer_norm2.weight": "model-00001-of-00002.safetensors",
444
- "model.vision_tower.transformer.encoder.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
445
- "model.vision_tower.transformer.encoder.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
446
- "model.vision_tower.transformer.encoder.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
447
- "model.vision_tower.transformer.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
448
- "model.vision_tower.transformer.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
449
- "model.vision_tower.transformer.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
450
- "model.vision_tower.transformer.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
451
- "model.vision_tower.transformer.encoder.layers.19.layer_norm1.weight": "model-00001-of-00002.safetensors",
452
- "model.vision_tower.transformer.encoder.layers.19.layer_norm2.weight": "model-00001-of-00002.safetensors",
453
- "model.vision_tower.transformer.encoder.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
454
- "model.vision_tower.transformer.encoder.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
455
- "model.vision_tower.transformer.encoder.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
456
- "model.vision_tower.transformer.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
457
- "model.vision_tower.transformer.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
458
- "model.vision_tower.transformer.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
459
- "model.vision_tower.transformer.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
460
- "model.vision_tower.transformer.encoder.layers.2.layer_norm1.weight": "model-00001-of-00002.safetensors",
461
- "model.vision_tower.transformer.encoder.layers.2.layer_norm2.weight": "model-00001-of-00002.safetensors",
462
- "model.vision_tower.transformer.encoder.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
463
- "model.vision_tower.transformer.encoder.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
464
- "model.vision_tower.transformer.encoder.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
465
- "model.vision_tower.transformer.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
466
- "model.vision_tower.transformer.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
467
- "model.vision_tower.transformer.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
468
- "model.vision_tower.transformer.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
469
- "model.vision_tower.transformer.encoder.layers.20.layer_norm1.weight": "model-00001-of-00002.safetensors",
470
- "model.vision_tower.transformer.encoder.layers.20.layer_norm2.weight": "model-00001-of-00002.safetensors",
471
- "model.vision_tower.transformer.encoder.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
472
- "model.vision_tower.transformer.encoder.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
473
- "model.vision_tower.transformer.encoder.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
474
- "model.vision_tower.transformer.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
475
- "model.vision_tower.transformer.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
476
- "model.vision_tower.transformer.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
477
- "model.vision_tower.transformer.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
478
- "model.vision_tower.transformer.encoder.layers.21.layer_norm1.weight": "model-00001-of-00002.safetensors",
479
- "model.vision_tower.transformer.encoder.layers.21.layer_norm2.weight": "model-00001-of-00002.safetensors",
480
- "model.vision_tower.transformer.encoder.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
481
- "model.vision_tower.transformer.encoder.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
482
- "model.vision_tower.transformer.encoder.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
483
- "model.vision_tower.transformer.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
484
- "model.vision_tower.transformer.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
485
- "model.vision_tower.transformer.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
486
- "model.vision_tower.transformer.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
487
- "model.vision_tower.transformer.encoder.layers.22.layer_norm1.weight": "model-00001-of-00002.safetensors",
488
- "model.vision_tower.transformer.encoder.layers.22.layer_norm2.weight": "model-00001-of-00002.safetensors",
489
- "model.vision_tower.transformer.encoder.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
490
- "model.vision_tower.transformer.encoder.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
491
- "model.vision_tower.transformer.encoder.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
492
- "model.vision_tower.transformer.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
493
- "model.vision_tower.transformer.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
494
- "model.vision_tower.transformer.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
495
- "model.vision_tower.transformer.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
496
- "model.vision_tower.transformer.encoder.layers.23.layer_norm1.weight": "model-00001-of-00002.safetensors",
497
- "model.vision_tower.transformer.encoder.layers.23.layer_norm2.weight": "model-00001-of-00002.safetensors",
498
- "model.vision_tower.transformer.encoder.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
499
- "model.vision_tower.transformer.encoder.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
500
- "model.vision_tower.transformer.encoder.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
501
- "model.vision_tower.transformer.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
502
- "model.vision_tower.transformer.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
503
- "model.vision_tower.transformer.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
504
- "model.vision_tower.transformer.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
505
- "model.vision_tower.transformer.encoder.layers.3.layer_norm1.weight": "model-00001-of-00002.safetensors",
506
- "model.vision_tower.transformer.encoder.layers.3.layer_norm2.weight": "model-00001-of-00002.safetensors",
507
- "model.vision_tower.transformer.encoder.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
508
- "model.vision_tower.transformer.encoder.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
509
- "model.vision_tower.transformer.encoder.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
510
- "model.vision_tower.transformer.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
511
- "model.vision_tower.transformer.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
512
- "model.vision_tower.transformer.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
513
- "model.vision_tower.transformer.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
514
- "model.vision_tower.transformer.encoder.layers.4.layer_norm1.weight": "model-00001-of-00002.safetensors",
515
- "model.vision_tower.transformer.encoder.layers.4.layer_norm2.weight": "model-00001-of-00002.safetensors",
516
- "model.vision_tower.transformer.encoder.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
517
- "model.vision_tower.transformer.encoder.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
518
- "model.vision_tower.transformer.encoder.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
519
- "model.vision_tower.transformer.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
520
- "model.vision_tower.transformer.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
521
- "model.vision_tower.transformer.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
522
- "model.vision_tower.transformer.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
523
- "model.vision_tower.transformer.encoder.layers.5.layer_norm1.weight": "model-00001-of-00002.safetensors",
524
- "model.vision_tower.transformer.encoder.layers.5.layer_norm2.weight": "model-00001-of-00002.safetensors",
525
- "model.vision_tower.transformer.encoder.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
526
- "model.vision_tower.transformer.encoder.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
527
- "model.vision_tower.transformer.encoder.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
528
- "model.vision_tower.transformer.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
529
- "model.vision_tower.transformer.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
530
- "model.vision_tower.transformer.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
531
- "model.vision_tower.transformer.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
532
- "model.vision_tower.transformer.encoder.layers.6.layer_norm1.weight": "model-00001-of-00002.safetensors",
533
- "model.vision_tower.transformer.encoder.layers.6.layer_norm2.weight": "model-00001-of-00002.safetensors",
534
- "model.vision_tower.transformer.encoder.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
535
- "model.vision_tower.transformer.encoder.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
536
- "model.vision_tower.transformer.encoder.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
537
- "model.vision_tower.transformer.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
538
- "model.vision_tower.transformer.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
539
- "model.vision_tower.transformer.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
540
- "model.vision_tower.transformer.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
541
- "model.vision_tower.transformer.encoder.layers.7.layer_norm1.weight": "model-00001-of-00002.safetensors",
542
- "model.vision_tower.transformer.encoder.layers.7.layer_norm2.weight": "model-00001-of-00002.safetensors",
543
- "model.vision_tower.transformer.encoder.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
544
- "model.vision_tower.transformer.encoder.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
545
- "model.vision_tower.transformer.encoder.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
546
- "model.vision_tower.transformer.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
547
- "model.vision_tower.transformer.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
548
- "model.vision_tower.transformer.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
549
- "model.vision_tower.transformer.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
550
- "model.vision_tower.transformer.encoder.layers.8.layer_norm1.weight": "model-00001-of-00002.safetensors",
551
- "model.vision_tower.transformer.encoder.layers.8.layer_norm2.weight": "model-00001-of-00002.safetensors",
552
- "model.vision_tower.transformer.encoder.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
553
- "model.vision_tower.transformer.encoder.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
554
- "model.vision_tower.transformer.encoder.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
555
- "model.vision_tower.transformer.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
556
- "model.vision_tower.transformer.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
557
- "model.vision_tower.transformer.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
558
- "model.vision_tower.transformer.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
559
- "model.vision_tower.transformer.encoder.layers.9.layer_norm1.weight": "model-00001-of-00002.safetensors",
560
- "model.vision_tower.transformer.encoder.layers.9.layer_norm2.weight": "model-00001-of-00002.safetensors",
561
- "model.vision_tower.transformer.encoder.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
562
- "model.vision_tower.transformer.encoder.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
563
- "model.vision_tower.transformer.encoder.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
564
- "model.vision_tower.transformer.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
565
- "model.vision_tower.transformer.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
566
- "model.vision_tower.transformer.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
567
- "model.vision_tower.transformer.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
568
  "model.vision_tower.transformer.rms_norm.weight": "model-00001-of-00002.safetensors",
569
- "model.visual_table.weight": "model-00002-of-00002.safetensors"
570
  }
571
  }
 
349
  "model.vision_tower.transformer.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
350
  "model.vision_tower.transformer.embeddings.position_embedding.weight": "model-00001-of-00002.safetensors",
351
  "model.vision_tower.transformer.embeddings.rms_norm.weight": "model-00001-of-00002.safetensors",
352
+ "model.vision_tower.transformer.encoder.layers.0.attention.k_proj.weight": "model-00001-of-00002.safetensors",
353
+ "model.vision_tower.transformer.encoder.layers.0.attention.out_proj.weight": "model-00001-of-00002.safetensors",
354
+ "model.vision_tower.transformer.encoder.layers.0.attention.q_proj.weight": "model-00001-of-00002.safetensors",
355
+ "model.vision_tower.transformer.encoder.layers.0.attention.v_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.vision_tower.transformer.encoder.layers.0.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
357
+ "model.vision_tower.transformer.encoder.layers.0.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
358
+ "model.vision_tower.transformer.encoder.layers.0.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
359
+ "model.vision_tower.transformer.encoder.layers.0.rms_norm1.weight": "model-00001-of-00002.safetensors",
360
+ "model.vision_tower.transformer.encoder.layers.0.rms_norm2.weight": "model-00001-of-00002.safetensors",
361
+ "model.vision_tower.transformer.encoder.layers.1.attention.k_proj.weight": "model-00001-of-00002.safetensors",
362
+ "model.vision_tower.transformer.encoder.layers.1.attention.out_proj.weight": "model-00001-of-00002.safetensors",
363
+ "model.vision_tower.transformer.encoder.layers.1.attention.q_proj.weight": "model-00001-of-00002.safetensors",
364
+ "model.vision_tower.transformer.encoder.layers.1.attention.v_proj.weight": "model-00001-of-00002.safetensors",
365
+ "model.vision_tower.transformer.encoder.layers.1.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
366
+ "model.vision_tower.transformer.encoder.layers.1.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
367
+ "model.vision_tower.transformer.encoder.layers.1.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
368
+ "model.vision_tower.transformer.encoder.layers.1.rms_norm1.weight": "model-00001-of-00002.safetensors",
369
+ "model.vision_tower.transformer.encoder.layers.1.rms_norm2.weight": "model-00001-of-00002.safetensors",
370
+ "model.vision_tower.transformer.encoder.layers.10.attention.k_proj.weight": "model-00001-of-00002.safetensors",
371
+ "model.vision_tower.transformer.encoder.layers.10.attention.out_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.vision_tower.transformer.encoder.layers.10.attention.q_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.vision_tower.transformer.encoder.layers.10.attention.v_proj.weight": "model-00001-of-00002.safetensors",
374
+ "model.vision_tower.transformer.encoder.layers.10.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
375
+ "model.vision_tower.transformer.encoder.layers.10.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
376
+ "model.vision_tower.transformer.encoder.layers.10.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
377
+ "model.vision_tower.transformer.encoder.layers.10.rms_norm1.weight": "model-00001-of-00002.safetensors",
378
+ "model.vision_tower.transformer.encoder.layers.10.rms_norm2.weight": "model-00001-of-00002.safetensors",
379
+ "model.vision_tower.transformer.encoder.layers.11.attention.k_proj.weight": "model-00001-of-00002.safetensors",
380
+ "model.vision_tower.transformer.encoder.layers.11.attention.out_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.vision_tower.transformer.encoder.layers.11.attention.q_proj.weight": "model-00001-of-00002.safetensors",
382
+ "model.vision_tower.transformer.encoder.layers.11.attention.v_proj.weight": "model-00001-of-00002.safetensors",
383
+ "model.vision_tower.transformer.encoder.layers.11.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
384
+ "model.vision_tower.transformer.encoder.layers.11.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.vision_tower.transformer.encoder.layers.11.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
386
+ "model.vision_tower.transformer.encoder.layers.11.rms_norm1.weight": "model-00001-of-00002.safetensors",
387
+ "model.vision_tower.transformer.encoder.layers.11.rms_norm2.weight": "model-00001-of-00002.safetensors",
388
+ "model.vision_tower.transformer.encoder.layers.12.attention.k_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.vision_tower.transformer.encoder.layers.12.attention.out_proj.weight": "model-00001-of-00002.safetensors",
390
+ "model.vision_tower.transformer.encoder.layers.12.attention.q_proj.weight": "model-00001-of-00002.safetensors",
391
+ "model.vision_tower.transformer.encoder.layers.12.attention.v_proj.weight": "model-00001-of-00002.safetensors",
392
+ "model.vision_tower.transformer.encoder.layers.12.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
393
+ "model.vision_tower.transformer.encoder.layers.12.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
394
+ "model.vision_tower.transformer.encoder.layers.12.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.vision_tower.transformer.encoder.layers.12.rms_norm1.weight": "model-00001-of-00002.safetensors",
396
+ "model.vision_tower.transformer.encoder.layers.12.rms_norm2.weight": "model-00001-of-00002.safetensors",
397
+ "model.vision_tower.transformer.encoder.layers.13.attention.k_proj.weight": "model-00001-of-00002.safetensors",
398
+ "model.vision_tower.transformer.encoder.layers.13.attention.out_proj.weight": "model-00001-of-00002.safetensors",
399
+ "model.vision_tower.transformer.encoder.layers.13.attention.q_proj.weight": "model-00001-of-00002.safetensors",
400
+ "model.vision_tower.transformer.encoder.layers.13.attention.v_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.vision_tower.transformer.encoder.layers.13.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.vision_tower.transformer.encoder.layers.13.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.vision_tower.transformer.encoder.layers.13.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.vision_tower.transformer.encoder.layers.13.rms_norm1.weight": "model-00001-of-00002.safetensors",
405
+ "model.vision_tower.transformer.encoder.layers.13.rms_norm2.weight": "model-00001-of-00002.safetensors",
406
+ "model.vision_tower.transformer.encoder.layers.14.attention.k_proj.weight": "model-00001-of-00002.safetensors",
407
+ "model.vision_tower.transformer.encoder.layers.14.attention.out_proj.weight": "model-00001-of-00002.safetensors",
408
+ "model.vision_tower.transformer.encoder.layers.14.attention.q_proj.weight": "model-00001-of-00002.safetensors",
409
+ "model.vision_tower.transformer.encoder.layers.14.attention.v_proj.weight": "model-00001-of-00002.safetensors",
410
+ "model.vision_tower.transformer.encoder.layers.14.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
411
+ "model.vision_tower.transformer.encoder.layers.14.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
412
+ "model.vision_tower.transformer.encoder.layers.14.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
413
+ "model.vision_tower.transformer.encoder.layers.14.rms_norm1.weight": "model-00001-of-00002.safetensors",
414
+ "model.vision_tower.transformer.encoder.layers.14.rms_norm2.weight": "model-00001-of-00002.safetensors",
415
+ "model.vision_tower.transformer.encoder.layers.15.attention.k_proj.weight": "model-00001-of-00002.safetensors",
416
+ "model.vision_tower.transformer.encoder.layers.15.attention.out_proj.weight": "model-00001-of-00002.safetensors",
417
+ "model.vision_tower.transformer.encoder.layers.15.attention.q_proj.weight": "model-00001-of-00002.safetensors",
418
+ "model.vision_tower.transformer.encoder.layers.15.attention.v_proj.weight": "model-00001-of-00002.safetensors",
419
+ "model.vision_tower.transformer.encoder.layers.15.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
420
+ "model.vision_tower.transformer.encoder.layers.15.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
421
+ "model.vision_tower.transformer.encoder.layers.15.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
422
+ "model.vision_tower.transformer.encoder.layers.15.rms_norm1.weight": "model-00001-of-00002.safetensors",
423
+ "model.vision_tower.transformer.encoder.layers.15.rms_norm2.weight": "model-00001-of-00002.safetensors",
424
+ "model.vision_tower.transformer.encoder.layers.16.attention.k_proj.weight": "model-00001-of-00002.safetensors",
425
+ "model.vision_tower.transformer.encoder.layers.16.attention.out_proj.weight": "model-00001-of-00002.safetensors",
426
+ "model.vision_tower.transformer.encoder.layers.16.attention.q_proj.weight": "model-00001-of-00002.safetensors",
427
+ "model.vision_tower.transformer.encoder.layers.16.attention.v_proj.weight": "model-00001-of-00002.safetensors",
428
+ "model.vision_tower.transformer.encoder.layers.16.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
429
+ "model.vision_tower.transformer.encoder.layers.16.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
430
+ "model.vision_tower.transformer.encoder.layers.16.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
431
+ "model.vision_tower.transformer.encoder.layers.16.rms_norm1.weight": "model-00001-of-00002.safetensors",
432
+ "model.vision_tower.transformer.encoder.layers.16.rms_norm2.weight": "model-00001-of-00002.safetensors",
433
+ "model.vision_tower.transformer.encoder.layers.17.attention.k_proj.weight": "model-00001-of-00002.safetensors",
434
+ "model.vision_tower.transformer.encoder.layers.17.attention.out_proj.weight": "model-00001-of-00002.safetensors",
435
+ "model.vision_tower.transformer.encoder.layers.17.attention.q_proj.weight": "model-00001-of-00002.safetensors",
436
+ "model.vision_tower.transformer.encoder.layers.17.attention.v_proj.weight": "model-00001-of-00002.safetensors",
437
+ "model.vision_tower.transformer.encoder.layers.17.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
438
+ "model.vision_tower.transformer.encoder.layers.17.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
439
+ "model.vision_tower.transformer.encoder.layers.17.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
440
+ "model.vision_tower.transformer.encoder.layers.17.rms_norm1.weight": "model-00001-of-00002.safetensors",
441
+ "model.vision_tower.transformer.encoder.layers.17.rms_norm2.weight": "model-00001-of-00002.safetensors",
442
+ "model.vision_tower.transformer.encoder.layers.18.attention.k_proj.weight": "model-00001-of-00002.safetensors",
443
+ "model.vision_tower.transformer.encoder.layers.18.attention.out_proj.weight": "model-00001-of-00002.safetensors",
444
+ "model.vision_tower.transformer.encoder.layers.18.attention.q_proj.weight": "model-00001-of-00002.safetensors",
445
+ "model.vision_tower.transformer.encoder.layers.18.attention.v_proj.weight": "model-00001-of-00002.safetensors",
446
+ "model.vision_tower.transformer.encoder.layers.18.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
447
+ "model.vision_tower.transformer.encoder.layers.18.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
448
+ "model.vision_tower.transformer.encoder.layers.18.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
449
+ "model.vision_tower.transformer.encoder.layers.18.rms_norm1.weight": "model-00001-of-00002.safetensors",
450
+ "model.vision_tower.transformer.encoder.layers.18.rms_norm2.weight": "model-00001-of-00002.safetensors",
451
+ "model.vision_tower.transformer.encoder.layers.19.attention.k_proj.weight": "model-00001-of-00002.safetensors",
452
+ "model.vision_tower.transformer.encoder.layers.19.attention.out_proj.weight": "model-00001-of-00002.safetensors",
453
+ "model.vision_tower.transformer.encoder.layers.19.attention.q_proj.weight": "model-00001-of-00002.safetensors",
454
+ "model.vision_tower.transformer.encoder.layers.19.attention.v_proj.weight": "model-00001-of-00002.safetensors",
455
+ "model.vision_tower.transformer.encoder.layers.19.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
456
+ "model.vision_tower.transformer.encoder.layers.19.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
457
+ "model.vision_tower.transformer.encoder.layers.19.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
458
+ "model.vision_tower.transformer.encoder.layers.19.rms_norm1.weight": "model-00001-of-00002.safetensors",
459
+ "model.vision_tower.transformer.encoder.layers.19.rms_norm2.weight": "model-00001-of-00002.safetensors",
460
+ "model.vision_tower.transformer.encoder.layers.2.attention.k_proj.weight": "model-00001-of-00002.safetensors",
461
+ "model.vision_tower.transformer.encoder.layers.2.attention.out_proj.weight": "model-00001-of-00002.safetensors",
462
+ "model.vision_tower.transformer.encoder.layers.2.attention.q_proj.weight": "model-00001-of-00002.safetensors",
463
+ "model.vision_tower.transformer.encoder.layers.2.attention.v_proj.weight": "model-00001-of-00002.safetensors",
464
+ "model.vision_tower.transformer.encoder.layers.2.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
465
+ "model.vision_tower.transformer.encoder.layers.2.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
466
+ "model.vision_tower.transformer.encoder.layers.2.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
467
+ "model.vision_tower.transformer.encoder.layers.2.rms_norm1.weight": "model-00001-of-00002.safetensors",
468
+ "model.vision_tower.transformer.encoder.layers.2.rms_norm2.weight": "model-00001-of-00002.safetensors",
469
+ "model.vision_tower.transformer.encoder.layers.20.attention.k_proj.weight": "model-00001-of-00002.safetensors",
470
+ "model.vision_tower.transformer.encoder.layers.20.attention.out_proj.weight": "model-00001-of-00002.safetensors",
471
+ "model.vision_tower.transformer.encoder.layers.20.attention.q_proj.weight": "model-00001-of-00002.safetensors",
472
+ "model.vision_tower.transformer.encoder.layers.20.attention.v_proj.weight": "model-00001-of-00002.safetensors",
473
+ "model.vision_tower.transformer.encoder.layers.20.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
474
+ "model.vision_tower.transformer.encoder.layers.20.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
475
+ "model.vision_tower.transformer.encoder.layers.20.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
476
+ "model.vision_tower.transformer.encoder.layers.20.rms_norm1.weight": "model-00001-of-00002.safetensors",
477
+ "model.vision_tower.transformer.encoder.layers.20.rms_norm2.weight": "model-00001-of-00002.safetensors",
478
+ "model.vision_tower.transformer.encoder.layers.21.attention.k_proj.weight": "model-00001-of-00002.safetensors",
479
+ "model.vision_tower.transformer.encoder.layers.21.attention.out_proj.weight": "model-00001-of-00002.safetensors",
480
+ "model.vision_tower.transformer.encoder.layers.21.attention.q_proj.weight": "model-00001-of-00002.safetensors",
481
+ "model.vision_tower.transformer.encoder.layers.21.attention.v_proj.weight": "model-00001-of-00002.safetensors",
482
+ "model.vision_tower.transformer.encoder.layers.21.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
483
+ "model.vision_tower.transformer.encoder.layers.21.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
484
+ "model.vision_tower.transformer.encoder.layers.21.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
485
+ "model.vision_tower.transformer.encoder.layers.21.rms_norm1.weight": "model-00001-of-00002.safetensors",
486
+ "model.vision_tower.transformer.encoder.layers.21.rms_norm2.weight": "model-00001-of-00002.safetensors",
487
+ "model.vision_tower.transformer.encoder.layers.22.attention.k_proj.weight": "model-00001-of-00002.safetensors",
488
+ "model.vision_tower.transformer.encoder.layers.22.attention.out_proj.weight": "model-00001-of-00002.safetensors",
489
+ "model.vision_tower.transformer.encoder.layers.22.attention.q_proj.weight": "model-00001-of-00002.safetensors",
490
+ "model.vision_tower.transformer.encoder.layers.22.attention.v_proj.weight": "model-00001-of-00002.safetensors",
491
+ "model.vision_tower.transformer.encoder.layers.22.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
492
+ "model.vision_tower.transformer.encoder.layers.22.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
493
+ "model.vision_tower.transformer.encoder.layers.22.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
494
+ "model.vision_tower.transformer.encoder.layers.22.rms_norm1.weight": "model-00001-of-00002.safetensors",
495
+ "model.vision_tower.transformer.encoder.layers.22.rms_norm2.weight": "model-00001-of-00002.safetensors",
496
+ "model.vision_tower.transformer.encoder.layers.23.attention.k_proj.weight": "model-00001-of-00002.safetensors",
497
+ "model.vision_tower.transformer.encoder.layers.23.attention.out_proj.weight": "model-00001-of-00002.safetensors",
498
+ "model.vision_tower.transformer.encoder.layers.23.attention.q_proj.weight": "model-00001-of-00002.safetensors",
499
+ "model.vision_tower.transformer.encoder.layers.23.attention.v_proj.weight": "model-00001-of-00002.safetensors",
500
+ "model.vision_tower.transformer.encoder.layers.23.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
501
+ "model.vision_tower.transformer.encoder.layers.23.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
502
+ "model.vision_tower.transformer.encoder.layers.23.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
503
+ "model.vision_tower.transformer.encoder.layers.23.rms_norm1.weight": "model-00001-of-00002.safetensors",
504
+ "model.vision_tower.transformer.encoder.layers.23.rms_norm2.weight": "model-00001-of-00002.safetensors",
505
+ "model.vision_tower.transformer.encoder.layers.3.attention.k_proj.weight": "model-00001-of-00002.safetensors",
506
+ "model.vision_tower.transformer.encoder.layers.3.attention.out_proj.weight": "model-00001-of-00002.safetensors",
507
+ "model.vision_tower.transformer.encoder.layers.3.attention.q_proj.weight": "model-00001-of-00002.safetensors",
508
+ "model.vision_tower.transformer.encoder.layers.3.attention.v_proj.weight": "model-00001-of-00002.safetensors",
509
+ "model.vision_tower.transformer.encoder.layers.3.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
510
+ "model.vision_tower.transformer.encoder.layers.3.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
511
+ "model.vision_tower.transformer.encoder.layers.3.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
512
+ "model.vision_tower.transformer.encoder.layers.3.rms_norm1.weight": "model-00001-of-00002.safetensors",
513
+ "model.vision_tower.transformer.encoder.layers.3.rms_norm2.weight": "model-00001-of-00002.safetensors",
514
+ "model.vision_tower.transformer.encoder.layers.4.attention.k_proj.weight": "model-00001-of-00002.safetensors",
515
+ "model.vision_tower.transformer.encoder.layers.4.attention.out_proj.weight": "model-00001-of-00002.safetensors",
516
+ "model.vision_tower.transformer.encoder.layers.4.attention.q_proj.weight": "model-00001-of-00002.safetensors",
517
+ "model.vision_tower.transformer.encoder.layers.4.attention.v_proj.weight": "model-00001-of-00002.safetensors",
518
+ "model.vision_tower.transformer.encoder.layers.4.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
519
+ "model.vision_tower.transformer.encoder.layers.4.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
520
+ "model.vision_tower.transformer.encoder.layers.4.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
521
+ "model.vision_tower.transformer.encoder.layers.4.rms_norm1.weight": "model-00001-of-00002.safetensors",
522
+ "model.vision_tower.transformer.encoder.layers.4.rms_norm2.weight": "model-00001-of-00002.safetensors",
523
+ "model.vision_tower.transformer.encoder.layers.5.attention.k_proj.weight": "model-00001-of-00002.safetensors",
524
+ "model.vision_tower.transformer.encoder.layers.5.attention.out_proj.weight": "model-00001-of-00002.safetensors",
525
+ "model.vision_tower.transformer.encoder.layers.5.attention.q_proj.weight": "model-00001-of-00002.safetensors",
526
+ "model.vision_tower.transformer.encoder.layers.5.attention.v_proj.weight": "model-00001-of-00002.safetensors",
527
+ "model.vision_tower.transformer.encoder.layers.5.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
528
+ "model.vision_tower.transformer.encoder.layers.5.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
529
+ "model.vision_tower.transformer.encoder.layers.5.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
530
+ "model.vision_tower.transformer.encoder.layers.5.rms_norm1.weight": "model-00001-of-00002.safetensors",
531
+ "model.vision_tower.transformer.encoder.layers.5.rms_norm2.weight": "model-00001-of-00002.safetensors",
532
+ "model.vision_tower.transformer.encoder.layers.6.attention.k_proj.weight": "model-00001-of-00002.safetensors",
533
+ "model.vision_tower.transformer.encoder.layers.6.attention.out_proj.weight": "model-00001-of-00002.safetensors",
534
+ "model.vision_tower.transformer.encoder.layers.6.attention.q_proj.weight": "model-00001-of-00002.safetensors",
535
+ "model.vision_tower.transformer.encoder.layers.6.attention.v_proj.weight": "model-00001-of-00002.safetensors",
536
+ "model.vision_tower.transformer.encoder.layers.6.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
537
+ "model.vision_tower.transformer.encoder.layers.6.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
538
+ "model.vision_tower.transformer.encoder.layers.6.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
539
+ "model.vision_tower.transformer.encoder.layers.6.rms_norm1.weight": "model-00001-of-00002.safetensors",
540
+ "model.vision_tower.transformer.encoder.layers.6.rms_norm2.weight": "model-00001-of-00002.safetensors",
541
+ "model.vision_tower.transformer.encoder.layers.7.attention.k_proj.weight": "model-00001-of-00002.safetensors",
542
+ "model.vision_tower.transformer.encoder.layers.7.attention.out_proj.weight": "model-00001-of-00002.safetensors",
543
+ "model.vision_tower.transformer.encoder.layers.7.attention.q_proj.weight": "model-00001-of-00002.safetensors",
544
+ "model.vision_tower.transformer.encoder.layers.7.attention.v_proj.weight": "model-00001-of-00002.safetensors",
545
+ "model.vision_tower.transformer.encoder.layers.7.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
546
+ "model.vision_tower.transformer.encoder.layers.7.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
547
+ "model.vision_tower.transformer.encoder.layers.7.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
548
+ "model.vision_tower.transformer.encoder.layers.7.rms_norm1.weight": "model-00001-of-00002.safetensors",
549
+ "model.vision_tower.transformer.encoder.layers.7.rms_norm2.weight": "model-00001-of-00002.safetensors",
550
+ "model.vision_tower.transformer.encoder.layers.8.attention.k_proj.weight": "model-00001-of-00002.safetensors",
551
+ "model.vision_tower.transformer.encoder.layers.8.attention.out_proj.weight": "model-00001-of-00002.safetensors",
552
+ "model.vision_tower.transformer.encoder.layers.8.attention.q_proj.weight": "model-00001-of-00002.safetensors",
553
+ "model.vision_tower.transformer.encoder.layers.8.attention.v_proj.weight": "model-00001-of-00002.safetensors",
554
+ "model.vision_tower.transformer.encoder.layers.8.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
555
+ "model.vision_tower.transformer.encoder.layers.8.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
556
+ "model.vision_tower.transformer.encoder.layers.8.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
557
+ "model.vision_tower.transformer.encoder.layers.8.rms_norm1.weight": "model-00001-of-00002.safetensors",
558
+ "model.vision_tower.transformer.encoder.layers.8.rms_norm2.weight": "model-00001-of-00002.safetensors",
559
+ "model.vision_tower.transformer.encoder.layers.9.attention.k_proj.weight": "model-00001-of-00002.safetensors",
560
+ "model.vision_tower.transformer.encoder.layers.9.attention.out_proj.weight": "model-00001-of-00002.safetensors",
561
+ "model.vision_tower.transformer.encoder.layers.9.attention.q_proj.weight": "model-00001-of-00002.safetensors",
562
+ "model.vision_tower.transformer.encoder.layers.9.attention.v_proj.weight": "model-00001-of-00002.safetensors",
563
+ "model.vision_tower.transformer.encoder.layers.9.ffn.down_proj.weight": "model-00001-of-00002.safetensors",
564
+ "model.vision_tower.transformer.encoder.layers.9.ffn.gate_proj.weight": "model-00001-of-00002.safetensors",
565
+ "model.vision_tower.transformer.encoder.layers.9.ffn.up_proj.weight": "model-00001-of-00002.safetensors",
566
+ "model.vision_tower.transformer.encoder.layers.9.rms_norm1.weight": "model-00001-of-00002.safetensors",
567
+ "model.vision_tower.transformer.encoder.layers.9.rms_norm2.weight": "model-00001-of-00002.safetensors",
568
  "model.vision_tower.transformer.rms_norm.weight": "model-00001-of-00002.safetensors",
569
+ "model.visual_embeddings_table.weight": "model-00002-of-00002.safetensors"
570
  }
571
  }